text stringlengths 4 1.02M | meta dict |
|---|---|
from __future__ import absolute_import
from .datasets import (
load_reward_data,
load_neuropixels_data,
load_reaching_data,
)
__all__ = [
'load_reward_data',
'load_neuropixels_data',
'load_reaching_data',
]
| {
"content_hash": "b74a889cac7fb9ca11942a556a389a7c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 38,
"avg_line_length": 17.923076923076923,
"alnum_prop": 0.6394849785407726,
"repo_name": "codekansas/spykes",
"id": "cf699428a2e0e034e1fad0f4a98cabf877220eee",
"size": "233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spykes/io/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121450"
}
],
"symlink_target": ""
} |
class Config(object):
"""Base config class."""
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = 'www.jiar.vip|www.jiar.vip'
class ProdConfig(Config):
"""Production config class."""
pass
class DevConfig(Config):
"""Development config class."""
# Open the DEBUG
DEBUG = True
# MySQL connection
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:664198@127.0.0.1:3306/Flask-Blog'
| {
"content_hash": "fd4a89aaa68092ae69c87b992a550fa1",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 85,
"avg_line_length": 27.866666666666667,
"alnum_prop": 0.6674641148325359,
"repo_name": "Jiar/Flask-Blog",
"id": "586585ec903fc184c5eab705f6c7d28de5715a74",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FlaskBlog/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9995"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "21197"
}
],
"symlink_target": ""
} |
from test import cassette, sleep, get_client_credentials_session
from test.resources.documents import *
from test.resources.files import assert_basket_file, assert_weaving_file
def test_should_list_files():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/files/list_files/list_files.yaml'):
doc = create_document(session)
doc.attach_file('fixtures/resources/files/basket.txt')
page = session.files.list()
assert len(page.items) == 1
assert page.count == 1
assert_basket_file(page.items[0])
def test_should_get_download_url_for_file():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/files/list_files/get_download_url.yaml'):
doc = create_document(session)
doc.attach_file('fixtures/resources/files/basket.txt')
url = session.files.list().items[0].download_url
assert '92c7a71b371eb439579be559b5eac9c09a743c42' in url
def test_should_page_through_files():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/files/list_files/page_through_files.yaml'):
doc1 = create_document(session)
doc2 = create_document(session)
doc1.attach_file('fixtures/resources/files/basket.txt')
doc1.attach_file('fixtures/resources/files/weaving.txt')
doc2.attach_file('fixtures/resources/files/basket.txt')
first_page = session.files.list(page_size=2)
assert len(first_page.items) == 2
assert first_page.count == 3
assert_basket_file(first_page.items[0])
assert_weaving_file(first_page.items[1])
second_page = first_page.next_page
assert len(second_page.items) == 1
assert second_page.count == 3
assert_basket_file(second_page.items[0])
def test_should_list_files_by_document():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/files/list_files/list_files_by_document.yaml'):
doc = create_document(session)
file = doc.attach_file('fixtures/resources/files/basket.txt')
page = doc.files.list()
assert len(page.items) == 1
assert page.count == 1
assert_basket_file(file)
def test_should_list_files_by_catalog_document():
session = get_client_credentials_session()
with cassette('fixtures/resources/files/list_files/list_files_by_catalog_document.yaml'):
doc = session.catalog.get('5cd8328e-febe-3299-8e26-cf6ab2c07f0f')
page = doc.files.list()
assert len(page.items) == 1
assert page.count == 1
assert page.items[0].filehash == '933b1d7ad3793aaf5de2090b4199cab8f8c6489a'
def test_should_list_files_by_group():
session = get_user_session()
delete_all_group_documents()
with cassette('fixtures/resources/files/list_files/list_files_by_group.yaml'):
doc = create_group_document(session)
file = doc.attach_file('fixtures/resources/files/basket.txt')
page = session.groups.get('164d48fb-2343-332d-b566-1a4884a992e4').files.list()
assert len(page.items) == 1
assert page.count == 1
assert_basket_file(file)
def test_should_list_files_added_since():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/files/list_files/added_since.yaml'):
doc1 = create_document(session)
doc1.attach_file('fixtures/resources/files/basket.txt')
sleep(2)
doc2 = create_document(session)
doc2.attach_file('fixtures/resources/files/basket.txt')
doc2.attach_file('fixtures/resources/files/weaving.txt')
page = session.files.list(added_since=doc2.created.replace(seconds=-1))
assert len(page.items) == 2
assert page.count == 2
assert_basket_file(page.items[0])
assert_weaving_file(page.items[1])
def test_should_list_files_deleted_since():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/files/list_files/deleted_since.yaml'):
doc = create_document(session)
file1 = doc.attach_file('fixtures/resources/files/basket.txt')
file2 = doc.attach_file('fixtures/resources/files/weaving.txt')
sleep(1)
file1.delete()
file2.delete()
page = session.files.list(deleted_since=doc.created)
assert len(page.items) == 2
assert page.count == 2
assert page.items[0].id == file1.id
assert page.items[1].id == file2.id
def test_should_get_document_for_file():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/files/list_files/get_document_for_file.yaml'):
doc = create_document(session)
doc.attach_file('fixtures/resources/files/basket.txt')
page = session.files.list()
assert len(page.items) == 1
assert page.count == 1
assert page.items[0].document().title == 'Underwater basket weaving'
def test_should_get_catalog_document_for_file():
session = get_client_credentials_session()
with cassette('fixtures/resources/files/list_files/get_catalog_document_for_file.yaml'):
doc = session.catalog.get('5cd8328e-febe-3299-8e26-cf6ab2c07f0f')
page = doc.files.list()
assert len(page.items) == 1
assert page.count == 1
assert page.items[0].document().title == 'Changes in tree reproductive traits reduce functional diversity ' \
'in a fragmented Atlantic forest landscape'
| {
"content_hash": "9988421fc84883c7f86266446e828158",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 117,
"avg_line_length": 33.07602339181287,
"alnum_prop": 0.6614214992927864,
"repo_name": "Mendeley/mendeley-python-sdk",
"id": "95906efad1e4115a6df116e2dab617c31fe3e3f3",
"size": "5656",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/manual/files/test_list_files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "169000"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from .models import TweetGroup
from datetime import datetime, timedelta
from django.utils import timezone
import pandas as pd
import scrape
import json
from django.db.models import Max
def index(request):
return render(request, 'sentiment/index.html', {})
def update(request,name):
entry = TweetGroup.objects.filter(name = name).aggregate(Max('date_added'))
now = timezone.now()
if entry['date_added__max'] == None or entry['date_added__max'] < (now - timedelta(minutes=100)):
(positive,negative,neutral,best_text,worst_text,total_score) = scrape.likeability(name)
t = TweetGroup()
t.name = name
t.positive = positive
t.negative = negative
t.neutral = neutral
t.date_added = now
t.best_text = best_text
t.worst_text = worst_text
t.total_score = total_score
t.save()
t = TweetGroup.objects.filter(name = name).order_by('-date_added')[0]
return HttpResponse(json.dumps({'name':t.name,'positive':t.positive,'negative':t.negative,'neutral':t.neutral,'best-text':t.best_text,'worst-text':t.worst_text,'total-score':round(float(t.total_score),4)}))
| {
"content_hash": "6c4f2654dd55edc12130654e466c0d55",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 210,
"avg_line_length": 38.39393939393939,
"alnum_prop": 0.6882399368587214,
"repo_name": "mattcoley/oxbridge-sentiment",
"id": "174c37322ff3d4e93bc1e5d1fc214650f441ce52",
"size": "1267",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oxbridgesentiment/sentiment/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77361"
},
{
"name": "HTML",
"bytes": "23747"
},
{
"name": "JavaScript",
"bytes": "159291"
},
{
"name": "Python",
"bytes": "16040"
}
],
"symlink_target": ""
} |
from rich.console import Console
from rich.theme import Theme
custom_theme = Theme({"info": "blue", "warning": "magenta", "error": "red"})
console = Console(force_terminal=True, color_system="standard", width=180, theme=custom_theme)
| {
"content_hash": "c81a47f6090766febf0be3211095a274",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 94,
"avg_line_length": 47,
"alnum_prop": 0.7319148936170212,
"repo_name": "bolkedebruin/airflow",
"id": "175fb7014ea832c226159b3be675ce2346678f55",
"size": "1020",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dev/breeze/src/airflow_breeze/console.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
} |
from redis_shard.shard import RedisShardAPI
from config import servers
import time
client = RedisShardAPI(servers)
client.set('test', 1)
print client.get('test')
client.zadd('testset', 'first', 1)
client.zadd('testset', 'second', 2)
print client.zrange('testset', 0, -1)
print client.zrank('testset', 'second')
print client.zrank('testset2', 'second')
client.set('foo', 2)
client.set('a{foo}', 5)
client.set('b{foo}', 5)
client.set('c{foo}', 5)
client.set('{foo}d', 6)
client.set('{foo}e', 7)
client.set('e{foo}f', 8)
print client.get_server_name('foo')
print client.get_server_name('c{foo}')
print client.get_server_name('{foo}d')
print client.get_server_name('{foo}e')
print client.get_server_name('e{foo}f')
t0 = time.time()
print client.tag_keys('*{foo}*')
t1 = time.time()
print t1 - t0
print client.keys('*{foo}*')
t2 = time.time()
print t2 - t1
print client.tag_mget('a{foo}', 'b{foo}', '{foo}d')
print client.tag_mget(['a{foo}', 'b{foo}', '{foo}d'])
| {
"content_hash": "2e57df8a374908c83144a9ffb359e83a",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 53,
"avg_line_length": 27.457142857142856,
"alnum_prop": 0.6722164412070759,
"repo_name": "zhihu/redis-shard",
"id": "7642ff6dd7617e54ca98ec321f9feb95f077a4c8",
"size": "1007",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/myapp.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "35088"
}
],
"symlink_target": ""
} |
import sys
import os
import xml.dom.minidom
from collections import defaultdict
from xml.sax.saxutils import escape as xml_escape, quoteattr
try:
from docopt import docopt
from xmlbuilder import XMLBuilder
except Exception, e:
print e
print 'Required modules: docopt, xmlbuilder'
sys.exit(1)
class DiagnosticsSet:
def __init__(self, input_json_file):
import json
import codecs
diags_set_json = codecs.getreader('utf-8')(input_json_file).read()
self.obj = json.loads(diags_set_json)
self.diagnostics = [Diagnostic(x)
for x in self.obj.get('diagnostics', [])]
@property
def pbxproj_path(self):
return os.path.join(self.obj.get('projectPath'), 'project.pbxproj')
def grouped_diagnostics(self, by_property='file'):
ret = defaultdict(list)
for diag in self.diagnostics:
value = getattr(diag, by_property)
ret[value].append(diag)
return ret
class Diagnostic:
def __init__(self, dictobj):
self.obj = dictobj
self.html = DictWrapper(self.obj.get('html'), defaultvalue='')
self.extent = DiagnosticExtent(self.obj.get('extent'))
def __getattr__(self, attrname):
default = 0 if attrname in ('severity', 'confidence') else ''
return self.obj.get(attrname) or default
class DiagnosticExtent:
def __init__(self, dictobj):
self.start = DictWrapper(dictobj.get('start'), defaultvalue=0)
self.end = DictWrapper(dictobj.get('end'), defaultvalue=0)
class DictWrapper:
def __init__(self, dictobj, defaultvalue):
self.obj = dictobj
self.defaultvalue = defaultvalue
def __getattr__(self, attrname):
return self.obj.get(attrname, self.defaultvalue)
FORMATTER_FUNCTIONS = {}
def formatter_function(func):
def func_wrapper(diags_set):
return func(diags_set)
FORMATTER_FUNCTIONS[func.__name__] = func
return func_wrapper
@formatter_function
def checkstyle_xml(diags_set):
def converted_severity(fauxpas_severity):
# checkstyle severities: ignore, info, warning, error
if (9 <= fauxpas_severity):
return 'error'
elif (5 <= fauxpas_severity):
return 'warning'
return 'info'
x = XMLBuilder('checkstyle')
diags_by_file = diags_set.grouped_diagnostics(by_property='file')
for filepath, diags in diags_by_file.items():
with x.file(name=filepath or diags_set.pbxproj_path):
for diag in diags:
message = diag.info
if 0 < len(message):
message += ' '
message += '(%s - %s)' % (diag.ruleName, diag.ruleDescription)
x.error(severity=converted_severity(diag.severity),
source=diag.ruleShortName,
message=message,
line=str(diag.extent.start.line),
column=str(diag.extent.start.utf16Column)
)
return str(x)
@formatter_function
def xcode(diags_set):
for diag in diags_set.diagnostics:
file_position = ''
if 0 < len(diag.file):
file_position = ('%s:%d:%d: '
% (diag.file,
diag.extent.start.line,
diag.extent.start.utf16Column))
description = (diag.info if 0 < len(diag.info)
else diag.ruleDescription).replace('\n', ' ')
severity = 'error' if 9 <= diag.severity else 'warning'
print ('%s%s: %s - %s (%s)'
% (file_position,
severity,
diag.ruleName,
description,
diag.ruleShortName)).encode('utf-8')
def main():
"""Usage: convert.py <format>
Faux Pas diagnostics JSON should be provided through standard input.
"""
args = docopt(main.__doc__)
format_name = args.get('<format>')
formatter_function = FORMATTER_FUNCTIONS.get(format_name)
if formatter_function is None:
sys.stderr.write('<format> must be one of: %s\n'
% ', '.join(FORMATTER_FUNCTIONS.keys()))
sys.exit(1)
print formatter_function(DiagnosticsSet(sys.stdin))
if __name__ == '__main__':
main()
| {
"content_hash": "7331ae33d00dbdd6fd5504ca01a62b59",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 78,
"avg_line_length": 30.082758620689656,
"alnum_prop": 0.5786336542870243,
"repo_name": "FauxPasApp/fauxpas-converter",
"id": "4274a91a57e4da4db92f1d771173b2db2bab74c9",
"size": "4408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fauxpas_convert.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4408"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
from ray.function_manager import FunctionDescriptor
import ray.signature
# Default parameters for remote functions.
DEFAULT_REMOTE_FUNCTION_CPUS = 1
DEFAULT_REMOTE_FUNCTION_NUM_RETURN_VALS = 1
DEFAULT_REMOTE_FUNCTION_MAX_CALLS = 0
logger = logging.getLogger(__name__)
class RemoteFunction(object):
"""A remote function.
This is a decorated function. It can be used to spawn tasks.
Attributes:
_function: The original function.
_function_descriptor: The function descriptor.
_function_name: The module and function name.
_num_cpus: The default number of CPUs to use for invocations of this
remote function.
_num_gpus: The default number of GPUs to use for invocations of this
remote function.
_resources: The default custom resource requirements for invocations of
this remote function.
_num_return_vals: The default number of return values for invocations
of this remote function.
_max_calls: The number of times a worker can execute this function
before executing.
_decorator: An optional decorator that should be applied to the remote
function invocation (as opposed to the function execution) before
invoking the function. The decorator must return a function that
takes in two arguments ("args" and "kwargs"). In most cases, it
should call the function that was passed into the decorator and
return the resulting ObjectIDs. For an example, see
"test_decorated_function" in "python/ray/tests/test_basic.py".
_function_signature: The function signature.
_last_driver_id_exported_for: The ID of the driver ID of the last Ray
session during which this remote function definition was exported.
This is an imperfect mechanism used to determine if we need to
export the remote function again. It is imperfect in the sense that
the actor class definition could be exported multiple times by
different workers.
"""
def __init__(self, function, num_cpus, num_gpus, resources,
num_return_vals, max_calls):
self._function = function
self._function_descriptor = FunctionDescriptor.from_function(function)
self._function_name = (
self._function.__module__ + "." + self._function.__name__)
self._num_cpus = (DEFAULT_REMOTE_FUNCTION_CPUS
if num_cpus is None else num_cpus)
self._num_gpus = num_gpus
self._resources = resources
self._num_return_vals = (DEFAULT_REMOTE_FUNCTION_NUM_RETURN_VALS if
num_return_vals is None else num_return_vals)
self._max_calls = (DEFAULT_REMOTE_FUNCTION_MAX_CALLS
if max_calls is None else max_calls)
self._decorator = getattr(function, "__ray_invocation_decorator__",
None)
ray.signature.check_signature_supported(self._function)
self._function_signature = ray.signature.extract_signature(
self._function)
self._last_driver_id_exported_for = None
def __call__(self, *args, **kwargs):
raise Exception("Remote functions cannot be called directly. Instead "
"of running '{}()', try '{}.remote()'.".format(
self._function_name, self._function_name))
def remote(self, *args, **kwargs):
"""This runs immediately when a remote function is called."""
return self._remote(args=args, kwargs=kwargs)
def _submit(self,
args=None,
kwargs=None,
num_return_vals=None,
num_cpus=None,
num_gpus=None,
resources=None):
logger.warning(
"WARNING: _submit() is being deprecated. Please use _remote().")
return self._remote(
args=args,
kwargs=kwargs,
num_return_vals=num_return_vals,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources)
def _remote(self,
args=None,
kwargs=None,
num_return_vals=None,
num_cpus=None,
num_gpus=None,
resources=None):
"""An experimental alternate way to submit remote functions."""
worker = ray.worker.get_global_worker()
worker.check_connected()
if (self._last_driver_id_exported_for is None
or self._last_driver_id_exported_for != worker.task_driver_id):
# If this function was exported in a previous session, we need to
# export this function again, because current GCS doesn't have it.
self._last_driver_id_exported_for = worker.task_driver_id
worker.function_actor_manager.export(self)
kwargs = {} if kwargs is None else kwargs
args = [] if args is None else args
if num_return_vals is None:
num_return_vals = self._num_return_vals
resources = ray.utils.resources_from_resource_arguments(
self._num_cpus, self._num_gpus, self._resources, num_cpus,
num_gpus, resources)
def invocation(args, kwargs):
args = ray.signature.extend_args(self._function_signature, args,
kwargs)
if worker.mode == ray.worker.LOCAL_MODE:
# In LOCAL_MODE, remote calls simply execute the function.
# We copy the arguments to prevent the function call from
# mutating them and to match the usual behavior of
# immutable remote objects.
result = self._function(*copy.deepcopy(args))
return result
object_ids = worker.submit_task(
self._function_descriptor,
args,
num_return_vals=num_return_vals,
resources=resources)
if len(object_ids) == 1:
return object_ids[0]
elif len(object_ids) > 1:
return object_ids
if self._decorator is not None:
invocation = self._decorator(invocation)
return invocation(args, kwargs)
| {
"content_hash": "db00bf50a26738b68e419f38f34557b9",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 79,
"avg_line_length": 42.05161290322581,
"alnum_prop": 0.5986498926050936,
"repo_name": "atumanov/ray",
"id": "44d2777a290000630f38741ae9e344e1b9a52b49",
"size": "6518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/remote_function.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20715"
},
{
"name": "C++",
"bytes": "1036803"
},
{
"name": "CSS",
"bytes": "9262"
},
{
"name": "Dockerfile",
"bytes": "3411"
},
{
"name": "HTML",
"bytes": "32704"
},
{
"name": "Java",
"bytes": "517715"
},
{
"name": "JavaScript",
"bytes": "8178"
},
{
"name": "Jupyter Notebook",
"bytes": "1610"
},
{
"name": "Python",
"bytes": "3081422"
},
{
"name": "Ruby",
"bytes": "956"
},
{
"name": "Shell",
"bytes": "76928"
},
{
"name": "Smarty",
"bytes": "955"
}
],
"symlink_target": ""
} |
"""Contains the Display base class, which is a logical display in the mpf-mc."""
from typing import List, Union, Optional
from math import floor
from kivy.uix.floatlayout import FloatLayout
from kivy.clock import Clock
from kivy.uix.screenmanager import (ScreenManager, NoTransition,
SlideTransition, SwapTransition,
FadeTransition, WipeTransition,
FallOutTransition, RiseInTransition,
ScreenManagerException)
from kivy.uix.widget import WidgetException as KivyWidgetException
from kivy.uix.scatter import Scatter
from kivy.graphics import (
Translate, Fbo, ClearColor, ClearBuffers, Scale)
from kivy.properties import ObjectProperty
from mpfmc.uix.widget import Widget
from mpfmc.uix.slide import Slide
MYPY = False
if MYPY: # pragma: no cover
from mpfmc.core.mc import MpfMc # pylint: disable-msg=cyclic-import,unused-import
from kivy.uix.widget import \
Widget as KivyWidget # pylint: disable-msg=cyclic-import,unused-import,ungrouped-imports
from mpfmc.uix.widget import WidgetContainer # pylint: disable-msg=cyclic-import,unused-import,ungrouped-imports
transition_map = dict(none=NoTransition,
slide=SlideTransition,
swap=SwapTransition,
fade=FadeTransition,
wipe=WipeTransition,
fall_out=FallOutTransition,
rise_in=RiseInTransition)
# pylint: disable-msg=too-many-instance-attributes
class Display(ScreenManager):
"""A display which can be used to show slides."""
displays_to_initialize = 0
texture = ObjectProperty(None, allownone=True)
@staticmethod
def create_default_display(mc: "MpfMc") -> None:
"""Create default display."""
Display(mc, 'default', width=800, height=600, enabled=True)
def __init__(self, mc: "MpfMc", name: str, **kwargs) -> None:
"""Initialise Display."""
self.mc = mc
self.name = name
self.config = kwargs
self._ready = False
self.tags = []
self.display = self
self.parents = []
self.mc.track_leak_reference(self)
Display.displays_to_initialize += 1
self.native_size = (self.config['width'], self.config['height'])
self.size_hint = (None, None)
self.size = self.native_size
self.enabled = self.config['enabled']
self.transition = NoTransition()
self._blank_slide_name = '{}_blank'.format(self.name)
super().__init__()
# It is possible that the current slide changes more than one time during a single clock
# frame. Sending multiple slide active events is not desired in this situation. This can
# easily be solved using Kivy clock events. The clock event will only be triggered once
# per clock frame no matter how many times it is called.
self._current_slide_changed = Clock.create_trigger(self._post_active_slide_event, -1)
# Need to create a widget that will be the parent of the slide manager. This is
# necessary to allow widgets with negative z values to remain in the display while
# slides are changed.
self.container = FloatLayout(size_hint=(None, None), size=self.size)
self.container.z = 0
self.container.add_widget(self)
self._display_created()
def __repr__(self):
return '<Display name={}{}, current slide={}, total slides={}>'.format(
self.name, self.size, self.current_slide_name, len(self.slides))
def get_frame_data(self, *args):
"""Return the content of this display as buffer.
@see: widget.export_to_png
"""
del args
fbo = Fbo(size=self._slide_manager_parent.size, with_stencilbuffer=True)
with fbo:
ClearColor(0, 0, 0, 1)
ClearBuffers()
Scale(1, -1, 1)
Translate(-self.x,
-self.y - self.height, 0)
fbo.add(self.canvas)
fbo.draw()
data = fbo.texture.pixels
fbo.remove(self.canvas)
return data
@property
def ready(self):
"""Return true if display is ready."""
return self._ready
@property
def parent_widgets(self) -> List["WidgetContainer"]:
"""The list of all widgets owned by the display parent."""
return [x for x in self.container.children if x != self]
def has_parent(self) -> bool:
"""Returns whether or not the display has a parent."""
return bool(self.container.parent is not None)
def _display_created(self, *args) -> None:
"""Callback after this display is created."""
del args
# There's a race condition since mpf-mc will continue while the display
# gets setup. So we need to wait to know that the display is done.
# Easiest way to do that is to check to see if the display is the right
# size, and when it is, we move on.
if (self.size[0] != self.native_size[0] or
self.size[1] != self.native_size[1]):
self.size = self.native_size
Clock.schedule_once(self._display_created, 0)
return
# Add this display to the list of all available displays
self.mc.displays[self.name] = self
# If this display is configured as the default, set it.
# If this display would overwrite an existing default, raise an AssertionError.
try:
if self.config['default']:
if 'default' not in self.mc.targets:
self.mc.targets['default'] = self
else:
raise AssertionError('Multiple displays have been set as the default. Please choose a single \
display to default to (\"{}\" is currently set as the default).'.format(
self.mc.targets['default'].name))
except KeyError:
pass
# Initialization is just about done, schedule callback to finish
Clock.schedule_once(self._init_done, 0)
def _init_done(self, *args) -> None:
"""Callback after this display has been initialized."""
del args
self.mc.post_mc_native_event('display_{}_initialized'.format(self.name))
'''event: display_(name)_initialized
config_section: displays
class_label: display
desc: The display called (name) has been initialized. This event is
generated in the MC, so it won't be sent to MPF if the MC is started up
and ready first.
This event is part of the MPF-MC boot process and is not particularly
useful for game developers. If you want to show a "boot" slide as
early as possible, use the *mc_ready* event.
'''
Display.displays_to_initialize -= 1
# Callback function to set this display to ready state once all displays
# have been initialized
self.mc.events.add_handler('displays_initialized', self._finalize_setup, priority=10000)
if not Display.displays_to_initialize:
Clock.schedule_once(self._displays_initialized)
def _displays_initialized(self, *args) -> None:
"""Callback after all displays have been initialized."""
del args
# Determine the 'default' display
if len(self.mc.displays) == 1:
self.mc.targets['default'] = next(iter(self.mc.displays.values()))
elif 'default' not in self.mc.targets:
for target in ('window', 'dmd'):
if target in self.mc.displays:
self.mc.targets['default'] = self.mc.displays[target]
break
if 'default' not in self.mc.targets:
self.mc.targets['default'] = self.mc.displays[
(sorted(self.mc.displays.keys()))[0]]
self.mc.log.info("Display: Setting '%s' as default display", self.mc.targets['default'].name)
self.mc.displays_initialized()
def _finalize_setup(self, **kwargs) -> None:
"""Callback function after all displays have been initialized. This
method finalizes the display setup and gets it ready to use as a
target. The 'display_{}_ready' event is posted once it is ready.
"""
del kwargs
# This display is now a valid target so add it to the list
self.mc.targets[self.name] = self
# Create a blank slide for this display. Why?
# 1. sometimes people try to play widgets with no slide. This makes
# that work.
# 2. the first slide that's created and added to this frame will be
# automatically shown, which we don't want. Also we want to ensure that
# our slide event will be called which only happens when this slide is
# switched to, rather than automatically added.
self.create_blank_slide()
self._ready = True
self.mc.post_mc_native_event('display_{}_ready'.format(self.name))
'''event: display_(name)_ready
config_section: displays
class_label: display
desc: The display target called (name) is now ready and available to
show slides.
This event is useful with display widgets where you want to add
a display to an existing slide which shows some content, but you
need to make sure the display exists before showing a slide.
So if you have a display called "overlay", then you can add it to
a slide however you want, and when it's added, the event
"display_overlay_ready" will be posted, and then you can use that event
in your slide_player to trigger the first slide you want to show.
Note that this event is posted by MPF-MC and will not exist on the MPF
side. So you can use this event for slide_player, widget_player, etc.,
but not to start shows or other things controlled by MPF.'''
@property
def current_slide(self) -> "Slide":
"""Returns the Slide object of the current slide."""
return self.current_screen
@current_slide.setter
def current_slide(self, value: Union[str, "Slide"]):
"""Set the current slide.
You can set it to a Slide object or a string of the slide name.
"""
if isinstance(value, Slide):
self._set_current_slide(value)
elif isinstance(value, str):
self._set_current_slide_name(value)
@property
def current_slide_name(self) -> str:
"""Returns the string name of the current slide."""
return self.current
@current_slide_name.setter
def current_slide_name(self, value: str):
"""Sets the current slide based on the string name of the slide you
want to be shown."""
self._set_current_slide_name(value)
@property
def slides(self) -> List["Slide"]:
"""Return list of slide objects of all the active slides for this slide frame."""
return self.screens
def create_blank_slide(self) -> "Slide":
"""Creates the blank slide for this display."""
return self.add_slide(self._blank_slide_name)
def get_slide(self, name: str) -> "Slide":
"""Return the Slide associated with the name or raise a
:class:`ScreenManagerException` if not found."""
return self.get_screen(name)
# pylint: disable-msg=too-many-arguments
def add_slide(self, name: str, config: Optional[dict] = None, priority: int = 0,
key: Optional[str] = None, play_kwargs: Optional[dict] = None) -> "Slide":
"""Add a slide to this display.
Add a slide to the list of slides managed by the display (or returns the existing
slide with the specified name if it already exists). This method just adds the
slide. It does not display it.
Args:
name: The slide name.
config: The slide config.
priority: The slide priority.
key: Optional key.
play_kwargs: Additional play kwargs.
Returns:
The Slide object.
"""
# See if slide already exists. If so, return it
if self.has_screen(name):
return self.get_screen(name)
# Slide() creates a new slide and adds it to this screen manager (display)
return Slide(mc=self.mc, name=name, target=self.name,
config=config, key=key, priority=priority,
play_kwargs=play_kwargs)
# pylint: disable-msg=too-many-arguments
def show_slide(self, slide_name: str, transition: Optional[str] = None,
key: Optional[str] = None, force: bool = False, priority: int = 0,
show: Optional[bool] = True, expire: Optional[float] = None,
play_kwargs: Optional[dict] = None, **kwargs) -> bool:
"""
Request to show the specified slide. Many of the slide parameters may be overridden
using the arguments for this function.
Args:
slide_name: The name of the slide.
transition: The slide transition (overrides any stored in the slide).
key: The slide key.
force: When true, the slide will be displayed regardless of the priority of the
current slide.
priority: The priority of the slide to show.
show: Whether or not to actually show the slide.
expire: Expiration time (in seconds) after which the slide will be automatically
removed (overrides value stored in the slide).
play_kwargs: Kwargs related to playing/displaying the slide.
**kwargs: Additional kwargs (will override settings in the play_kwargs parameter).
Returns:
True is the slide will be shown, False otherwise.
"""
# TODO: Is the show parameter really needed? Why call show_slide and not show the slide?
if not play_kwargs:
play_kwargs = kwargs
else:
play_kwargs.update(kwargs)
if self.has_screen(slide_name):
slide = self.get_screen(slide_name)
else:
try:
slide_config = self.mc.slides[slide_name]
except KeyError:
raise AssertionError("Slide {} not found".format(slide_name))
slide = self.add_slide(name=slide_name,
config=slide_config,
priority=priority,
key=key,
play_kwargs=play_kwargs)
# update the widgets with whatever kwargs came through here
if play_kwargs:
for widget in slide.walk():
try:
widget.update_kwargs(**play_kwargs)
except AttributeError:
pass
if not transition:
try: # anon slides are in the collection
transition = self.mc.slides[slide_name]['transition']
except KeyError:
pass
# If there's an expire kwarg, that takes priority over slide's expire
if expire:
slide.schedule_removal(expire)
elif slide.expire:
slide.schedule_removal(slide.expire)
if (slide.priority >= self.current_slide.priority and show) or force:
# We need to show this slide
# Have to set a transition even if there's not one because we have
# to remove whatever transition was last used
self.transition.stop()
self.transition = self.mc.transition_manager.get_transition(transition)
self._set_current_slide(slide)
return True
else:
# Not showing this slide
return False
# pylint: disable-msg=too-many-arguments
def add_and_show_slide(self, widgets: Optional[dict] = None,
slide_name: Optional[str] = None,
transition: Optional[str] = None, priority: int = 0,
key: Optional[str] = None, force: bool = False,
background_color=None,
expire: Optional[float] = None, play_kwargs=None,
**kwargs) -> bool:
"""Create and show the slide.
If a slide with this name already exists, it will be replaced.
Args:
widgets: An optional dictionary of widgets to add to the slide.
slide_name: The name of the slide.
transition: The slide transition (overrides any stored in the slide).
force: When true, the slide will be displayed regardless of the priority of the
current slide.
key: The slide key.
priority: The priority of the slide to show.
expire: Expiration time (in seconds) after which the slide will be automatically
removed (overrides value stored in the slide).
play_kwargs: Kwargs related to playing/displaying the slide.
**kwargs: Additional kwargs (will override settings in the play_kwargs parameter).
Returns:
True is the slide will be shown, False otherwise.
"""
if not play_kwargs:
play_kwargs = kwargs
else:
play_kwargs.update(kwargs)
slide_obj = self.add_slide(name=slide_name,
config=dict(widgets=widgets, background_color=background_color),
priority=priority, key=key)
return self.show_slide(slide_name=slide_obj.name, transition=transition,
priority=priority, force=force, key=key,
expire=expire, play_kwargs=play_kwargs)
def remove_slide(self, slide: Union["Slide", str],
transition_config: Optional[dict] = None) -> bool:
"""Remove a slide from the display.
Args:
slide: The slide to remove (can be name string or Slide object)
transition_config: Optional dictionary containing the transition configuration
to use while removing the slide (overrides slide setting).
Returns:
True if the slide is scheduled to be removed, False otherwise
Notes:
You can't remove the automatically generated blank slide, so if you try it will
raise an exception.
"""
# TODO:
# Warning, if you just created a slide, you have to wait at least on
# tick before removing it. Can we prevent that? What if someone tilts
# at the exact perfect instant when a mode was starting or something?
# maybe we make sure to run a Kivy tick between bcp reads or something?
try:
slide = self.get_slide(slide)
except ScreenManagerException: # no slide by that name
if not isinstance(slide, Slide):
return False
# Do not allow the blank slide to be removed
if slide.name == self._blank_slide_name:
return False
slide.prepare_for_removal()
self.mc.active_slides.pop(slide.name, None)
# If the current slide is the active one, find the next highest
# priority one to show instead.
if self.current_slide == slide:
new_slide = self._get_next_highest_priority_slide(slide)
if self.transition:
self.transition.stop()
if transition_config:
self.transition = self.mc.transition_manager.get_transition(
transition_config)
elif self.current_slide.transition_out:
self.transition = self.mc.transition_manager.get_transition(
self.current_slide.transition_out)
else:
self.transition = NoTransition()
self.transition.bind(on_complete=self._remove_transition)
else:
new_slide = None
# Set the new slide first, so we can transition out of the old before removing
if new_slide:
self._set_current_slide(new_slide)
try:
self.remove_widget(slide)
except ScreenManagerException:
return False
return True
def _remove_transition(self, transition):
"""Remove transition if done."""
if self.transition == transition:
self.transition = NoTransition()
def _set_current_slide(self, slide: "Slide"):
# slide frame requires at least one slide, so if you try to set current
# to None, it will create a new slide called '<display name>_blank' at
# priority 0 and show that one
# I think there's a bug in Kivy 1.9.1. According to the docs, you
# should be able to set self.current to a screen name. But if that
# screen is already managed by this screen manager, it will raise
# an exception, and the source is way deep in their code and not
# easy to fix by subclassing. So this is sort of a hack that looks
# for that exception, and if it sees it, it just removes and
# re-adds the screen.
if not slide:
slide = self.create_blank_slide()
if self.current == slide.name:
return
try:
self.current = slide.name
except KivyWidgetException:
self.remove_widget(slide)
self.add_widget(slide)
self.current = slide.name
# Post the event via callback at the end of the frame in case more than
# one slide was set in this frame, so we only want to post the event
# for the slide that actually became active. The Kivy clock event will
# only call the associated callback once per frame when triggered no
# matter how many times it is called.
self._current_slide_changed()
def _set_current_slide_name(self, slide_name):
try:
self._set_current_slide(self.get_screen(slide_name))
except ScreenManagerException:
raise ValueError('Cannot set current slide to "{}" as there is '
'no slide in this slide_frame with that '
'name'.format(slide_name))
def _get_next_highest_priority_slide(self, slide: "Slide") -> "Slide":
"""Return the slide with the next highest priority."""
new_slide = None
for s in self.slides:
if s == slide:
continue
if not new_slide:
new_slide = s
elif s.priority > new_slide.priority:
new_slide = s
elif (s.priority == new_slide.priority and
s.creation_order > new_slide.creation_order):
new_slide = s
return new_slide
def add_widget_to_current_slide(self, widget: "KivyWidget"):
"""Adds the widget to the current slide."""
self.current_slide.add_widget(widget)
def add_widgets_to_current_slide(self, widgets: List["KivyWidget"]):
"""Adds a list of widgets to the current slide."""
for w in widgets:
self.add_widget_to_current_slide(w)
def remove_widgets_by_key(self, key: str) -> None:
"""Removes all widgets with the specified key."""
for widget in self.find_widgets_by_key(key):
widget.prepare_for_removal()
if isinstance(widget, Widget) and widget.container and widget.container.parent:
widget.container.parent.remove_widget(widget.container)
elif widget.parent:
widget.parent.remove_widget(widget)
def find_widgets_by_key(self, key: str) -> List["KivyWidget"]:
"""Retrieves a list of all widgets with the specified key value."""
widgets = []
# First find all matching widgets owned by the slide parent
for child in self.parent_widgets:
widgets.extend([x for x in child.walk(restrict=True, loopback=False)
if hasattr(x, "key") and x.key == key])
# Finally find all matching widgets owned by each slide
for slide in self.slides:
widgets.extend(slide.find_widgets_by_key(key))
return widgets
def _post_active_slide_event(self, dt) -> None:
"""Posts an event that a new slide is now active."""
del dt
self.mc.post_mc_native_event('slide_{}_active'.format(self.current_slide_name))
"""event: slide_(name)_active
config_section: slides
class_label: slide
desc: A slide called (name) has just become active, meaning that
it's now showing as the current slide.
This is useful for things like the widget_player where you want to
target a widget for a specific slide, but you can only do so if
that slide exists.
Slide names do not take into account what display they're playing on,
so be sure to create machine-wide unique names when you're naming
your slides.
"""
class DisplayOutput(Scatter):
"""Show a display as a widget."""
def __init__(self, parent: "KivyWidget", display: "Display", **kwargs):
kwargs.setdefault('do_scale', False)
kwargs.setdefault('do_translation', False)
kwargs.setdefault('do_rotation', False)
super().__init__(**kwargs)
self.key = None
# It is important that the content of this display output does not contain any
# circular references to the same display (cannot do a recursive
# picture-in-picture or Kivy will crash). Detect and prevent that situation.
# Rather than adding the display as a child of this widget, we will simply
# add the display's canvas to this widget's canvas. This allows the display
# to essentially have multiple parents. The canvas contains all the
# instructions needed to draw the widgets.
self.display = display
parent.bind(size=self.on_parent_resize)
self._fit_to_parent()
#
# Tree management
#
def add_display_source(self, widget):
"""Add a new widget as a child of this widget.
:Parameters:
`widget`: :class:`Widget`
Widget to add to our list of children.
"""
if not isinstance(widget, Display):
raise KivyWidgetException(
'add_widget_multi_parent() can be used only with instances'
' of the Display class.')
widget = widget.__self__
if widget is self:
raise KivyWidgetException(
'Widget instances cannot be added to themselves.')
widget.parent = self
widget.parents.append(self)
canvas = self.canvas
canvas.add(widget.container.canvas)
def remove_display_source(self, widget):
"""Remove a display."""
if not isinstance(widget, Display):
raise KivyWidgetException(
'remove_display_source() can be used only with instances'
' of the Display class.')
widget.parents.remove(self)
widget.parent = None
self.canvas.remove(widget.container.canvas)
def __repr__(self) -> str: # pragma: no cover
try:
return '<DisplayOutput size={}, pos={}, source={}>'.format(
self.size, self.pos, self.display.name)
except AttributeError:
return '<DisplayOutput size={}, source=(none)>'.format(self.size)
def on_parent_resize(self, *args):
"""Fit to parent on resize."""
del args
self._fit_to_parent()
def _fit_to_parent(self, *args):
"""Center and scale display output in parent display widget"""
del args
if self.parent:
self.scale = min(self.parent.width / float(self.display.width),
self.parent.height / float(self.display.height))
self.width = floor(self.scale * self.display.width)
self.height = floor(self.scale * self.display.height)
self.x = (self.parent.width - self.width) // 2
self.y = (self.parent.height - self.height) // 2
| {
"content_hash": "a47a9dd282fd1258f0958fb0f89ec6a6",
"timestamp": "",
"source": "github",
"line_count": 704,
"max_line_length": 119,
"avg_line_length": 40.40340909090909,
"alnum_prop": 0.5995992124876951,
"repo_name": "missionpinball/mpf-mc",
"id": "a59dbc510e7e0c24750a2047ceb12e83407b2a5b",
"size": "28444",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "mpfmc/uix/display.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3434"
},
{
"name": "Cython",
"bytes": "44814"
},
{
"name": "Dockerfile",
"bytes": "1441"
},
{
"name": "Makefile",
"bytes": "262"
},
{
"name": "Python",
"bytes": "1198826"
},
{
"name": "Shell",
"bytes": "829"
}
],
"symlink_target": ""
} |
from .settings import *
CLJS_LOADER['FIGWHEEL'] = False
CLJS_LOADER['CLJS_BUILD'] = Keyword('min')
| {
"content_hash": "cd452f5befa5532648fb78b4d0d972eb",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 25,
"alnum_prop": 0.7,
"repo_name": "jstaffans/django-cljs-loader",
"id": "eab8f964b33a8dc89f415bac102239b0dc802d30",
"size": "100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/simple/app/settings-prod.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Clojure",
"bytes": "1650"
},
{
"name": "Makefile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "16738"
}
],
"symlink_target": ""
} |
from gevent import monkey; monkey.patch_all()
from werkzeug.serving import run_with_reloader
from gevent import wsgi
from rowboat import ENV
from rowboat.web import rowboat
from rowboat.sql import init_db
from yaml import load
import os
import copy
import click
import signal
import logging
import gevent
import subprocess
class BotSupervisor(object):
def __init__(self, env={}):
self.proc = None
self.env = env
self.bind_signals()
self.start()
def bind_signals(self):
signal.signal(signal.SIGUSR1, self.handle_sigusr1)
def handle_sigusr1(self, signum, frame):
print 'SIGUSR1 - RESTARTING'
gevent.spawn(self.restart)
def start(self):
env = copy.deepcopy(os.environ)
env.update(self.env)
self.proc = subprocess.Popen(['python', '-m', 'disco.cli', '--config', 'config.yaml'], env=env)
def stop(self):
self.proc.terminate()
def restart(self):
try:
self.stop()
except:
pass
self.start()
def run_forever(self):
while True:
self.proc.wait()
gevent.sleep(5)
@click.group()
def cli():
logging.getLogger().setLevel(logging.INFO)
@cli.command()
@click.option('--reloader/--no-reloader', '-r', default=False)
def serve(reloader):
def run():
wsgi.WSGIServer(('0.0.0.0', 8686), rowboat.app).serve_forever()
if reloader:
run_with_reloader(run)
else:
run()
@cli.command()
@click.option('--env', '-e', default='local')
def bot(env):
with open('config.yaml', 'r') as f:
config = load(f)
supervisor = BotSupervisor(env={
'ENV': env,
'DSN': config['DSN'],
})
supervisor.run_forever()
@cli.command('add-global-admin')
@click.argument('user-id')
def add_global_admin(user_id):
from rowboat.redis import rdb
from rowboat.models.user import User
init_db(ENV)
rdb.sadd('global_admins', user_id)
User.update(admin=True).where(User.user_id == user_id).execute()
print 'Ok, added {} as a global admin'.format(user_id)
@cli.command('wh-add')
@click.argument('guild-id')
@click.argument('flag')
def add_whitelist(guild_id, flag):
from rowboat.models.guild import Guild
init_db(ENV)
flag = Guild.WhitelistFlags.get(flag)
if not flag:
print 'Invalid flag'
return
try:
guild = Guild.get(guild_id=guild_id)
except Guild.DoesNotExist:
print 'No guild exists with that id'
return
guild.whitelist.append(int(flag))
guild.save()
guild.emit_update()
print 'added flag'
@cli.command('wh-rmv')
@click.argument('guild-id')
@click.argument('flag')
def rmv_whitelist(guild_id, flag):
from rowboat.models.guild import Guild
init_db(ENV)
flag = Guild.WhitelistFlags.get(flag)
if not flag:
print 'Invalid flag'
return
try:
guild = Guild.get(guild_id=guild_id)
except Guild.DoesNotExist:
print 'No guild exists with that id'
return
guild.whitelist.remove(int(flag))
guild.save()
guild.emit_update()
print 'removed flag'
if __name__ == '__main__':
cli()
| {
"content_hash": "b6e346d3d3f01c63929c7eb83c05c7bf",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 103,
"avg_line_length": 22.089655172413792,
"alnum_prop": 0.621604745551046,
"repo_name": "justdotJS/rowboat",
"id": "eaa4e6a2799c7a6a37692311f3e6880fdd36211d",
"size": "3225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from maskrcnn_benchmark.structures.bounding_box import BoxList
# TODO check if want to return a single BoxList or a composite
# object
class MaskPostProcessor(nn.Module):
"""
From the results of the CNN, post process the masks
by taking the mask corresponding to the class with max
probability (which are of fixed size and directly output
by the CNN) and return the masks in the mask field of the BoxList.
If a masker object is passed, it will additionally
project the masks in the image according to the locations in boxes,
"""
def __init__(self, masker=None):
super(MaskPostProcessor, self).__init__()
self.masker = masker
def forward(self, x, boxes):
"""
Arguments:
x (Tensor): the mask logits
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra field mask
"""
mask_prob = x.sigmoid()
# select masks coresponding to the predicted classes
num_masks = x.shape[0]
labels = [bbox.get_field("labels") for bbox in boxes]
labels = torch.cat(labels)
index = torch.arange(num_masks, device=labels.device)
mask_prob = mask_prob[index, labels][:, None]
boxes_per_image = [len(box) for box in boxes]
mask_prob = mask_prob.split(boxes_per_image, dim=0)
if self.masker:
mask_prob = self.masker(mask_prob, boxes)
results = []
for prob, box in zip(mask_prob, boxes):
bbox = BoxList(box.bbox, box.size, mode="xyxy")
for field in box.fields():
bbox.add_field(field, box.get_field(field))
bbox.add_field("mask", prob)
results.append(bbox)
return results
class MaskPostProcessorCOCOFormat(MaskPostProcessor):
"""
From the results of the CNN, post process the results
so that the masks are pasted in the image, and
additionally convert the results to COCO format.
"""
def forward(self, x, boxes):
import pycocotools.mask as mask_util
import numpy as np
results = super(MaskPostProcessorCOCOFormat, self).forward(x, boxes)
for result in results:
masks = result.get_field("mask").cpu()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
result.add_field("mask", rles)
return results
# the next two functions should be merged inside Masker
# but are kept here for the moment while we need them
# temporarily gor paste_mask_in_image
def expand_boxes(boxes, scale):
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = torch.zeros_like(boxes)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def expand_masks(mask, padding):
N = mask.shape[0]
M = mask.shape[-1]
pad2 = 2 * padding
scale = float(M + pad2) / M
padded_mask = mask.new_zeros((N, 1, M + pad2, M + pad2))
padded_mask[:, :, padding:-padding, padding:-padding] = mask
return padded_mask, scale
def paste_mask_in_image(mask, box, im_h, im_w, thresh=0.5, padding=1):
# Need to work on the CPU, where fp16 isn't supported - cast to float to avoid this
mask = mask.float()
box = box.float()
padded_mask, scale = expand_masks(mask[None], padding=padding)
mask = padded_mask[0, 0]
box = expand_boxes(box[None], scale)[0]
box = box.to(dtype=torch.int32)
TO_REMOVE = 1
w = int(box[2] - box[0] + TO_REMOVE)
h = int(box[3] - box[1] + TO_REMOVE)
w = max(w, 1)
h = max(h, 1)
# Set shape to [batchxCxHxW]
mask = mask.expand((1, 1, -1, -1))
# Resize mask
mask = mask.to(torch.float32)
mask = F.interpolate(mask, size=(h, w), mode='bilinear', align_corners=False)
mask = mask[0][0]
if thresh >= 0:
mask = mask > thresh
else:
# for visualization and debugging, we also
# allow it to return an unmodified mask
mask = (mask * 255).to(torch.uint8)
im_mask = torch.zeros((im_h, im_w), dtype=torch.uint8)
x_0 = max(box[0], 0)
x_1 = min(box[2] + 1, im_w)
y_0 = max(box[1], 0)
y_1 = min(box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])
]
return im_mask
class Masker(object):
"""
Projects a set of masks in an image on the locations
specified by the bounding boxes
"""
def __init__(self, threshold=0.5, padding=1):
self.threshold = threshold
self.padding = padding
def forward_single_image(self, masks, boxes):
boxes = boxes.convert("xyxy")
im_w, im_h = boxes.size
res = [
paste_mask_in_image(mask[0], box, im_h, im_w, self.threshold, self.padding)
for mask, box in zip(masks, boxes.bbox)
]
if len(res) > 0:
res = torch.stack(res, dim=0)[:, None]
else:
res = masks.new_empty((0, 1, masks.shape[-2], masks.shape[-1]))
return res
def __call__(self, masks, boxes):
if isinstance(boxes, BoxList):
boxes = [boxes]
# Make some sanity check
assert len(boxes) == len(masks), "Masks and boxes should have the same length."
# TODO: Is this JIT compatible?
# If not we should make it compatible.
results = []
for mask, box in zip(masks, boxes):
assert mask.shape[0] == len(box), "Number of objects should be the same."
result = self.forward_single_image(mask, box)
results.append(result)
return results
def make_roi_mask_post_processor(cfg):
if cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS:
mask_threshold = cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD
masker = Masker(threshold=mask_threshold, padding=1)
else:
masker = None
mask_post_processor = MaskPostProcessor(masker)
return mask_post_processor
| {
"content_hash": "196dfe50b59bcee75466115a7ee7fd01",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 87,
"avg_line_length": 31.759615384615383,
"alnum_prop": 0.587344838026037,
"repo_name": "mlperf/training_results_v0.7",
"id": "fbe248973919db782a008e687f6067a23eef6f64",
"size": "6745",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/maskrcnn/implementations/implementation_closed/maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.postgres.operations import UnaccentExtension
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20170919_1335'),
]
operations = [
UnaccentExtension()
]
| {
"content_hash": "bedf60b8e344e2ca55981e45775f321c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 64,
"avg_line_length": 20.533333333333335,
"alnum_prop": 0.6948051948051948,
"repo_name": "Angoreher/xcero",
"id": "e6c9f81f252cc47ce800b7cab7f3064658e5285e",
"size": "381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/migrations/0004_auto_20170919_1336.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "505"
},
{
"name": "HTML",
"bytes": "31590"
},
{
"name": "JavaScript",
"bytes": "78"
},
{
"name": "Python",
"bytes": "166869"
},
{
"name": "Shell",
"bytes": "8487"
}
],
"symlink_target": ""
} |
from urllib.request import Request, urlopen
import xml.dom.minidom
req = Request('https://64.103.26.61/api/contextaware/v1/maps/info/DevNetCampus/DevNetBuilding/DevNetZone')
req.add_header('Authorization', 'Basic bGVhcm5pbmc6bGVhcm5pbmc==')
response = urlopen(req)
responseString = response.read().decode("utf-8")
dom = xml.dom.minidom.parseString(responseString)
xml = dom.toprettyxml()
print(xml)
response.close() | {
"content_hash": "7a428569552fb4d2f8bd38f5752a43bb",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 106,
"avg_line_length": 41.6,
"alnum_prop": 0.7860576923076923,
"repo_name": "SivagnanamCiena/coding-skills-sample-code",
"id": "7aa08ee22285e2609ea8078849abaa0ca54124a9",
"size": "416",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "coding201-parsing-xml/get-ap-xml-2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "157"
},
{
"name": "C++",
"bytes": "9084"
},
{
"name": "CMake",
"bytes": "1491"
},
{
"name": "Python",
"bytes": "63879"
}
],
"symlink_target": ""
} |
class WorkerResult(object):
RUNNING = 'RUNNING'
SUCCESS = 'SUCCESS'
| {
"content_hash": "aec5ac857cdd0e5a3bab0405f3df795f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 27,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6710526315789473,
"repo_name": "sinistance/PokemonGo-Bot",
"id": "f38ceb9704e7255638e8d67b02c443e3914453ba",
"size": "76",
"binary": false,
"copies": "11",
"ref": "refs/heads/dev",
"path": "pokemongo_bot/worker_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Protocol Buffer",
"bytes": "43769"
},
{
"name": "Python",
"bytes": "159785"
},
{
"name": "Shell",
"bytes": "1013"
}
],
"symlink_target": ""
} |
import pandas as pd
tips = pd.read_csv('tips.csv')
tips['tip_pct'] = tips['tip'] / tips['total_bill']
grouped = tips.groupby(['sex', 'smoker'])
grouped_pct = grouped['tip_pct']
print grouped_pct.agg('mean') # == print grouped_pct.mean()
print grouped_pct.agg(['mean', 'std'])
| {
"content_hash": "06242d2423336c07f7594c6968210c3a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 59,
"avg_line_length": 25.454545454545453,
"alnum_prop": 0.65,
"repo_name": "m1key/data-science-sandbox",
"id": "0b541bb8d0b64962580a49f5a07166adc995f01e",
"size": "280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tips.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "78806"
},
{
"name": "Python",
"bytes": "16223"
}
],
"symlink_target": ""
} |
"""Helpers for monkey-patching Python code."""
__all__ = [
'MonkeyPatcher',
'patch',
]
class MonkeyPatcher(object):
"""A set of monkey-patches that can be applied and removed all together.
Use this to cover up attributes with new objects. Particularly useful for
testing difficult code.
"""
# Marker used to indicate that the patched attribute did not exist on the
# object before we patched it.
_NO_SUCH_ATTRIBUTE = object()
def __init__(self, *patches):
"""Construct a `MonkeyPatcher`.
:param patches: The patches to apply, each should be (obj, name,
new_value). Providing patches here is equivalent to calling
`add_patch`.
"""
# List of patches to apply in (obj, name, value).
self._patches_to_apply = []
# List of the original values for things that have been patched.
# (obj, name, value) format.
self._originals = []
for patch in patches:
self.add_patch(*patch)
def add_patch(self, obj, name, value):
"""Add a patch to overwrite 'name' on 'obj' with 'value'.
The attribute C{name} on C{obj} will be assigned to C{value} when
C{patch} is called or during C{run_with_patches}.
You can restore the original values with a call to restore().
"""
self._patches_to_apply.append((obj, name, value))
def patch(self):
"""Apply all of the patches that have been specified with `add_patch`.
Reverse this operation using L{restore}.
"""
for obj, name, value in self._patches_to_apply:
original_value = getattr(obj, name, self._NO_SUCH_ATTRIBUTE)
self._originals.append((obj, name, original_value))
setattr(obj, name, value)
def restore(self):
"""Restore all original values to any patched objects.
If the patched attribute did not exist on an object before it was
patched, `restore` will delete the attribute so as to return the
object to its original state.
"""
while self._originals:
obj, name, value = self._originals.pop()
if value is self._NO_SUCH_ATTRIBUTE:
delattr(obj, name)
else:
setattr(obj, name, value)
def run_with_patches(self, f, *args, **kw):
"""Run 'f' with the given args and kwargs with all patches applied.
Restores all objects to their original state when finished.
"""
self.patch()
try:
return f(*args, **kw)
finally:
self.restore()
def patch(obj, attribute, value):
"""Set 'obj.attribute' to 'value' and return a callable to restore 'obj'.
If 'attribute' is not set on 'obj' already, then the returned callable
will delete the attribute when called.
:param obj: An object to monkey-patch.
:param attribute: The name of the attribute to patch.
:param value: The value to set 'obj.attribute' to.
:return: A nullary callable that, when run, will restore 'obj' to its
original state.
"""
patcher = MonkeyPatcher((obj, attribute, value))
patcher.patch()
return patcher.restore
| {
"content_hash": "91c62c86c82345541d4f6f77b86bb06b",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 78,
"avg_line_length": 34.02105263157895,
"alnum_prop": 0.6116955445544554,
"repo_name": "xujun10110/pth-toolkit",
"id": "ba0ac8fd8bfcde02ca6b09a9c27b30724c4f3e98",
"size": "3301",
"binary": false,
"copies": "64",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/samba/external/testtools/monkey.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Perl",
"bytes": "5113"
},
{
"name": "Python",
"bytes": "1294855"
},
{
"name": "Shell",
"bytes": "1105"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import include, re_path
from django.views.i18n import JavaScriptCatalog
from django.views.static import serve
from cms.test_utils.project.placeholderapp.views import example_view
from cms.utils.conf import get_cms_setting
admin.autodiscover()
urlpatterns = [
re_path(r'^media/(?P<path>.*)$', serve,
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
re_path(r'^media/cms/(?P<path>.*)$', serve,
{'document_root': get_cms_setting('MEDIA_ROOT'), 'show_indexes': True}),
re_path(r'^jsi18n/(?P<packages>\S+?)/$', JavaScriptCatalog.as_view()),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += [
re_path(r'^admin/', admin.site.urls),
re_path(r'^example/$', example_view),
re_path(r'^', include('cms.urls')),
]
| {
"content_hash": "762a6fcd9479121cbfd59ad9c8a1fbd9",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 84,
"avg_line_length": 34.370370370370374,
"alnum_prop": 0.6961206896551724,
"repo_name": "datakortet/django-cms",
"id": "1aeb44d2e96e2f5a1dd06b9ee055f5e8a810b742",
"size": "928",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "cms/test_utils/project/urls_no18n.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "203975"
},
{
"name": "JavaScript",
"bytes": "1249081"
},
{
"name": "Python",
"bytes": "2374270"
},
{
"name": "SCSS",
"bytes": "137720"
},
{
"name": "Shell",
"bytes": "22511"
}
],
"symlink_target": ""
} |
"""
Tests of the new image services, both as a service layer,
and as a WSGI layer
"""
import urlparse
from lxml import etree
import webob
from nova.api.openstack.compute import images
from nova.api.openstack.compute.views import images as images_view
from nova.api.openstack import xmlutil
from nova import exception
from nova import flags
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
FLAGS = flags.FLAGS
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
NOW_API_FORMAT = "2010-10-11T10:30:22Z"
class ImagesControllerTest(test.TestCase):
"""
Test of the OpenStack API /images application controller w/Glance.
"""
def setUp(self):
"""Run before each test."""
super(ImagesControllerTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
fakes.stub_out_compute_api_backup(self.stubs)
fakes.stub_out_glance(self.stubs)
self.controller = images.Controller()
def test_get_image(self):
fake_req = fakes.HTTPRequest.blank('/v2/fake/images/123')
actual_image = self.controller.show(fake_req, '124')
href = "http://localhost/v2/fake/images/124"
bookmark = "http://localhost/fake/images/124"
alternate = "%s/fake/images/124" % utils.generate_glance_url()
server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
server_href = "http://localhost/v2/fake/servers/" + server_uuid
server_bookmark = "http://localhost/fake/servers/" + server_uuid
expected_image = {
"image": {
"id": "124",
"name": "queued snapshot",
"updated": NOW_API_FORMAT,
"created": NOW_API_FORMAT,
"status": "SAVING",
"progress": 25,
"minDisk": 0,
"minRam": 0,
'server': {
'id': server_uuid,
"links": [{
"rel": "self",
"href": server_href,
},
{
"rel": "bookmark",
"href": server_bookmark,
}],
},
"metadata": {
"instance_uuid": server_uuid,
"user_id": "fake",
},
"links": [{
"rel": "self",
"href": href,
},
{
"rel": "bookmark",
"href": bookmark,
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": alternate
}],
},
}
self.assertDictMatch(expected_image, actual_image)
def test_get_image_with_custom_prefix(self):
self.flags(osapi_compute_link_prefix='https://zoo.com:42',
osapi_glance_link_prefix='http://circus.com:34')
fake_req = fakes.HTTPRequest.blank('/v2/fake/images/123')
actual_image = self.controller.show(fake_req, '124')
href = "https://zoo.com:42/v2/fake/images/124"
bookmark = "https://zoo.com:42/fake/images/124"
alternate = "http://circus.com:34/fake/images/124"
server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
server_href = "https://zoo.com:42/v2/fake/servers/" + server_uuid
server_bookmark = "https://zoo.com:42/fake/servers/" + server_uuid
expected_image = {
"image": {
"id": "124",
"name": "queued snapshot",
"updated": NOW_API_FORMAT,
"created": NOW_API_FORMAT,
"status": "SAVING",
"progress": 25,
"minDisk": 0,
"minRam": 0,
'server': {
'id': server_uuid,
"links": [{
"rel": "self",
"href": server_href,
},
{
"rel": "bookmark",
"href": server_bookmark,
}],
},
"metadata": {
"instance_uuid": server_uuid,
"user_id": "fake",
},
"links": [{
"rel": "self",
"href": href,
},
{
"rel": "bookmark",
"href": bookmark,
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": alternate
}],
},
}
self.assertDictMatch(expected_image, actual_image)
def test_get_image_404(self):
fake_req = fakes.HTTPRequest.blank('/v2/fake/images/unknown')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, fake_req, 'unknown')
def test_get_image_index(self):
fake_req = fakes.HTTPRequest.blank('/v2/fake/images')
response_list = self.controller.index(fake_req)['images']
expected_images = [
{
"id": "123",
"name": "public image",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/images/123",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/123",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/123" %
utils.generate_glance_url()
},
],
},
{
"id": "124",
"name": "queued snapshot",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/images/124",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/124",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/124" %
utils.generate_glance_url()
},
],
},
{
"id": "125",
"name": "saving snapshot",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/images/125",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/125",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/125" %
utils.generate_glance_url()
},
],
},
{
"id": "126",
"name": "active snapshot",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/images/126",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/126",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/126" %
utils.generate_glance_url()
},
],
},
{
"id": "127",
"name": "killed snapshot",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/images/127",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/127",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/127" %
utils.generate_glance_url()
},
],
},
{
"id": "128",
"name": "deleted snapshot",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/images/128",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/128",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/128" %
utils.generate_glance_url()
},
],
},
{
"id": "129",
"name": "pending_delete snapshot",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/images/129",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/129",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/129" %
utils.generate_glance_url()
},
],
},
{
"id": "130",
"name": None,
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/images/130",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/130",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/130" %
utils.generate_glance_url()
},
],
},
]
self.assertDictListMatch(response_list, expected_images)
def test_get_image_index_with_limit(self):
request = fakes.HTTPRequest.blank('/v2/fake/images?limit=3')
response = self.controller.index(request)
response_list = response["images"]
response_links = response["images_links"]
alternate = "%s/fake/images/%s"
expected_images = [
{
"id": "123",
"name": "public image",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/images/123",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/123",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": alternate % (utils.generate_glance_url(), 123),
},
],
},
{
"id": "124",
"name": "queued snapshot",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/images/124",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/124",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": alternate % (utils.generate_glance_url(), 124),
},
],
},
{
"id": "125",
"name": "saving snapshot",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/images/125",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/125",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": alternate % (utils.generate_glance_url(), 125),
},
],
},
]
self.assertDictListMatch(response_list, expected_images)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/images', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertDictMatch({'limit': ['3'], 'marker': ['125']}, params)
def test_get_image_index_with_limit_and_extra_params(self):
request = fakes.HTTPRequest.blank('/v2/fake/images?limit=3&extra=bo')
response = self.controller.index(request)
response_links = response["images_links"]
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/images', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertDictMatch(
{'limit': ['3'], 'marker': ['125'], 'extra': ['bo']},
params)
def test_get_image_index_with_big_limit(self):
"""
Make sure we don't get images_links if limit is set
and the number of images returned is < limit
"""
request = fakes.HTTPRequest.blank('/v2/fake/images?limit=30')
response = self.controller.index(request)
self.assertEqual(response.keys(), ['images'])
self.assertEqual(len(response['images']), 8)
def test_get_image_details(self):
request = fakes.HTTPRequest.blank('/v2/fake/images/detail')
response = self.controller.detail(request)
response_list = response["images"]
server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
server_href = "http://localhost/v2/fake/servers/" + server_uuid
server_bookmark = "http://localhost/fake/servers/" + server_uuid
alternate = "%s/fake/images/%s"
expected = [{
'id': '123',
'name': 'public image',
'metadata': {'key1': 'value1'},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'ACTIVE',
'progress': 100,
'minDisk': 10,
'minRam': 128,
"links": [{
"rel": "self",
"href": "http://localhost/v2/fake/images/123",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/123",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": alternate % (utils.generate_glance_url(), 123),
}],
},
{
'id': '124',
'name': 'queued snapshot',
'metadata': {
u'instance_uuid': server_uuid,
u'user_id': u'fake',
},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'SAVING',
'progress': 25,
'minDisk': 0,
'minRam': 0,
'server': {
'id': server_uuid,
"links": [{
"rel": "self",
"href": server_href,
},
{
"rel": "bookmark",
"href": server_bookmark,
}],
},
"links": [{
"rel": "self",
"href": "http://localhost/v2/fake/images/124",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/124",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": alternate % (utils.generate_glance_url(), 124),
}],
},
{
'id': '125',
'name': 'saving snapshot',
'metadata': {
u'instance_uuid': server_uuid,
u'user_id': u'fake',
},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'SAVING',
'progress': 50,
'minDisk': 0,
'minRam': 0,
'server': {
'id': server_uuid,
"links": [{
"rel": "self",
"href": server_href,
},
{
"rel": "bookmark",
"href": server_bookmark,
}],
},
"links": [{
"rel": "self",
"href": "http://localhost/v2/fake/images/125",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/125",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/125" % utils.generate_glance_url()
}],
},
{
'id': '126',
'name': 'active snapshot',
'metadata': {
u'instance_uuid': server_uuid,
u'user_id': u'fake',
},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'ACTIVE',
'progress': 100,
'minDisk': 0,
'minRam': 0,
'server': {
'id': server_uuid,
"links": [{
"rel": "self",
"href": server_href,
},
{
"rel": "bookmark",
"href": server_bookmark,
}],
},
"links": [{
"rel": "self",
"href": "http://localhost/v2/fake/images/126",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/126",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/126" % utils.generate_glance_url()
}],
},
{
'id': '127',
'name': 'killed snapshot',
'metadata': {
u'instance_uuid': server_uuid,
u'user_id': u'fake',
},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'ERROR',
'progress': 0,
'minDisk': 0,
'minRam': 0,
'server': {
'id': server_uuid,
"links": [{
"rel": "self",
"href": server_href,
},
{
"rel": "bookmark",
"href": server_bookmark,
}],
},
"links": [{
"rel": "self",
"href": "http://localhost/v2/fake/images/127",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/127",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/127" % utils.generate_glance_url()
}],
},
{
'id': '128',
'name': 'deleted snapshot',
'metadata': {
u'instance_uuid': server_uuid,
u'user_id': u'fake',
},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'DELETED',
'progress': 0,
'minDisk': 0,
'minRam': 0,
'server': {
'id': server_uuid,
"links": [{
"rel": "self",
"href": server_href,
},
{
"rel": "bookmark",
"href": server_bookmark,
}],
},
"links": [{
"rel": "self",
"href": "http://localhost/v2/fake/images/128",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/128",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/128" % utils.generate_glance_url()
}],
},
{
'id': '129',
'name': 'pending_delete snapshot',
'metadata': {
u'instance_uuid': server_uuid,
u'user_id': u'fake',
},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'DELETED',
'progress': 0,
'minDisk': 0,
'minRam': 0,
'server': {
'id': server_uuid,
"links": [{
"rel": "self",
"href": server_href,
},
{
"rel": "bookmark",
"href": server_bookmark,
}],
},
"links": [{
"rel": "self",
"href": "http://localhost/v2/fake/images/129",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/129",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/129" % utils.generate_glance_url()
}],
},
{
'id': '130',
'name': None,
'metadata': {},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'ACTIVE',
'progress': 100,
'minDisk': 0,
'minRam': 0,
"links": [{
"rel": "self",
"href": "http://localhost/v2/fake/images/130",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/130",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": "%s/fake/images/130" % utils.generate_glance_url()
}],
},
]
self.assertDictListMatch(expected, response_list)
def test_get_image_details_with_limit(self):
request = fakes.HTTPRequest.blank('/v2/fake/images/detail?limit=2')
response = self.controller.detail(request)
response_list = response["images"]
response_links = response["images_links"]
server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
server_href = "http://localhost/v2/fake/servers/" + server_uuid
server_bookmark = "http://localhost/fake/servers/" + server_uuid
alternate = "%s/fake/images/%s"
expected = [{
'id': '123',
'name': 'public image',
'metadata': {'key1': 'value1'},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'ACTIVE',
'minDisk': 10,
'progress': 100,
'minRam': 128,
"links": [{
"rel": "self",
"href": "http://localhost/v2/fake/images/123",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/123",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": alternate % (utils.generate_glance_url(), 123),
}],
},
{
'id': '124',
'name': 'queued snapshot',
'metadata': {
u'instance_uuid': server_uuid,
u'user_id': u'fake',
},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'SAVING',
'minDisk': 0,
'progress': 25,
'minRam': 0,
'server': {
'id': server_uuid,
"links": [{
"rel": "self",
"href": server_href,
},
{
"rel": "bookmark",
"href": server_bookmark,
}],
},
"links": [{
"rel": "self",
"href": "http://localhost/v2/fake/images/124",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/images/124",
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": alternate % (utils.generate_glance_url(), 124),
}],
}]
self.assertDictListMatch(expected, response_list)
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/images', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertDictMatch({'limit': ['2'], 'marker': ['124']}, params)
def test_image_filter_with_name(self):
image_service = self.mox.CreateMockAnything()
filters = {'name': 'testname'}
request = fakes.HTTPRequest.blank('/v2/images?name=testname')
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_filter_with_min_ram(self):
image_service = self.mox.CreateMockAnything()
filters = {'min_ram': '0'}
request = fakes.HTTPRequest.blank('/v2/images?minRam=0')
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_filter_with_min_disk(self):
image_service = self.mox.CreateMockAnything()
filters = {'min_disk': '7'}
request = fakes.HTTPRequest.blank('/v2/images?minDisk=7')
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_filter_with_status(self):
image_service = self.mox.CreateMockAnything()
filters = {'status': 'active'}
request = fakes.HTTPRequest.blank('/v2/images?status=ACTIVE')
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_filter_with_property(self):
image_service = self.mox.CreateMockAnything()
filters = {'property-test': '3'}
request = fakes.HTTPRequest.blank('/v2/images?property-test=3')
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_filter_server(self):
image_service = self.mox.CreateMockAnything()
uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf'
ref = 'http://localhost:8774/servers/' + uuid
filters = {'property-instance_uuid': uuid}
request = fakes.HTTPRequest.blank('/v2/images?server=' + ref)
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_filter_changes_since(self):
image_service = self.mox.CreateMockAnything()
filters = {'changes-since': '2011-01-24T17:08Z'}
request = fakes.HTTPRequest.blank('/v2/images?changes-since='
'2011-01-24T17:08Z')
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_filter_with_type(self):
image_service = self.mox.CreateMockAnything()
filters = {'property-image_type': 'BASE'}
request = fakes.HTTPRequest.blank('/v2/images?type=BASE')
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_filter_not_supported(self):
image_service = self.mox.CreateMockAnything()
filters = {'status': 'active'}
request = fakes.HTTPRequest.blank('/v2/images?status=ACTIVE&'
'UNSUPPORTEDFILTER=testname')
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_no_filters(self):
image_service = self.mox.CreateMockAnything()
filters = {}
request = fakes.HTTPRequest.blank('/v2/images')
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_invalid_marker(self):
class InvalidImageService(object):
def index(self, *args, **kwargs):
raise exception.Invalid('meow')
request = fakes.HTTPRequest.blank('/v2/images?marker=invalid')
controller = images.Controller(image_service=InvalidImageService())
self.assertRaises(webob.exc.HTTPBadRequest, controller.index, request)
def test_image_detail_filter_with_name(self):
image_service = self.mox.CreateMockAnything()
filters = {'name': 'testname'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
'?name=testname')
context = request.environ['nova.context']
image_service.detail(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.detail(request)
def test_image_detail_filter_with_status(self):
image_service = self.mox.CreateMockAnything()
filters = {'status': 'active'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
'?status=ACTIVE')
context = request.environ['nova.context']
image_service.detail(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.detail(request)
def test_image_detail_filter_with_property(self):
image_service = self.mox.CreateMockAnything()
filters = {'property-test': '3'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
'?property-test=3')
context = request.environ['nova.context']
image_service.detail(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.detail(request)
def test_image_detail_filter_server_href(self):
image_service = self.mox.CreateMockAnything()
uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf'
ref = 'http://localhost:8774/servers/' + uuid
url = '/v2/fake/images/detail?server=' + ref
filters = {'property-instance_uuid': uuid}
request = fakes.HTTPRequest.blank(url)
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_detail_filter_server_uuid(self):
image_service = self.mox.CreateMockAnything()
uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf'
url = '/v2/fake/images/detail?server=' + uuid
filters = {'property-instance_uuid': uuid}
request = fakes.HTTPRequest.blank(url)
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_detail_filter_changes_since(self):
image_service = self.mox.CreateMockAnything()
filters = {'changes-since': '2011-01-24T17:08Z'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail'
'?changes-since=2011-01-24T17:08Z')
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_detail_filter_with_type(self):
image_service = self.mox.CreateMockAnything()
filters = {'property-image_type': 'BASE'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail?type=BASE')
context = request.environ['nova.context']
image_service.index(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.index(request)
def test_image_detail_filter_not_supported(self):
image_service = self.mox.CreateMockAnything()
filters = {'status': 'active'}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail?status='
'ACTIVE&UNSUPPORTEDFILTER=testname')
context = request.environ['nova.context']
image_service.detail(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.detail(request)
def test_image_detail_no_filters(self):
image_service = self.mox.CreateMockAnything()
filters = {}
request = fakes.HTTPRequest.blank('/v2/fake/images/detail')
context = request.environ['nova.context']
image_service.detail(context, filters=filters).AndReturn([])
self.mox.ReplayAll()
controller = images.Controller(image_service=image_service)
controller.detail(request)
def test_image_detail_invalid_marker(self):
class InvalidImageService(object):
def detail(self, *args, **kwargs):
raise exception.Invalid('meow')
request = fakes.HTTPRequest.blank('/v2/images?marker=invalid')
controller = images.Controller(image_service=InvalidImageService())
self.assertRaises(webob.exc.HTTPBadRequest, controller.detail, request)
def test_generate_alternate_link(self):
view = images_view.ViewBuilder()
request = fakes.HTTPRequest.blank('/v2/fake/images/1')
generated_url = view._get_alternate_link(request, 1)
actual_url = "%s/fake/images/1" % utils.generate_glance_url()
self.assertEqual(generated_url, actual_url)
def test_delete_image(self):
request = fakes.HTTPRequest.blank('/v2/fake/images/124')
request.method = 'DELETE'
response = self.controller.delete(request, '124')
self.assertEqual(response.status_int, 204)
def test_delete_image_not_found(self):
request = fakes.HTTPRequest.blank('/v2/fake/images/300')
request.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, request, '300')
class ImageXMLSerializationTest(test.TestCase):
TIMESTAMP = "2010-10-11T10:30:22Z"
SERVER_UUID = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74'
SERVER_HREF = 'http://localhost/v2/fake/servers/' + SERVER_UUID
SERVER_BOOKMARK = 'http://localhost/fake/servers/' + SERVER_UUID
IMAGE_HREF = 'http://localhost/v2/fake/images/%s'
IMAGE_NEXT = 'http://localhost/v2/fake/images?limit=%s&marker=%s'
IMAGE_BOOKMARK = 'http://localhost/fake/images/%s'
def test_xml_declaration(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'progress': 80,
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_show(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'progress': 80,
'minRam': 10,
'minDisk': 100,
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status', 'progress']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = image_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root.get('id'), image_dict['server']['id'])
link_nodes = server_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['server']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_show_zero_metadata(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'metadata': {},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
meta_nodes = root.findall('{0}meta'.format(ATOMNS))
self.assertEqual(len(meta_nodes), 0)
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root.get('id'), image_dict['server']['id'])
link_nodes = server_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['server']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_show_image_no_metadata_key(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
meta_nodes = root.findall('{0}meta'.format(ATOMNS))
self.assertEqual(len(meta_nodes), 0)
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root.get('id'), image_dict['server']['id'])
link_nodes = server_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['server']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_show_no_server(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = image_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root, None)
def test_show_with_min_ram(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'progress': 80,
'minRam': 256,
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
'minRam']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = image_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root.get('id'), image_dict['server']['id'])
link_nodes = server_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['server']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_show_with_min_disk(self):
serializer = images.ImageTemplate()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'progress': 80,
'minDisk': 5,
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'image')
image_dict = fixture['image']
for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
'minDisk']:
self.assertEqual(root.get(key), str(image_dict[key]))
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
metadata_root = root.find('{0}metadata'.format(NS))
metadata_elems = metadata_root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = image_dict['metadata'].items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
server_root = root.find('{0}server'.format(NS))
self.assertEqual(server_root.get('id'), image_dict['server']['id'])
link_nodes = server_root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['server']['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_index(self):
serializer = images.MinimalImagesTemplate()
fixture = {
'images': [
{
'id': 1,
'name': 'Image1',
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
{
'id': 2,
'name': 'Image2',
'links': [
{
'href': self.IMAGE_HREF % 2,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 2,
'rel': 'bookmark',
},
],
},
]
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'images_index')
image_elems = root.findall('{0}image'.format(NS))
self.assertEqual(len(image_elems), 2)
for i, image_elem in enumerate(image_elems):
image_dict = fixture['images'][i]
for key in ['name', 'id']:
self.assertEqual(image_elem.get(key), str(image_dict[key]))
link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
def test_index_with_links(self):
serializer = images.MinimalImagesTemplate()
fixture = {
'images': [
{
'id': 1,
'name': 'Image1',
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
{
'id': 2,
'name': 'Image2',
'links': [
{
'href': self.IMAGE_HREF % 2,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 2,
'rel': 'bookmark',
},
],
},
],
'images_links': [
{
'rel': 'next',
'href': self.IMAGE_NEXT % (2, 2),
}
],
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'images_index')
image_elems = root.findall('{0}image'.format(NS))
self.assertEqual(len(image_elems), 2)
for i, image_elem in enumerate(image_elems):
image_dict = fixture['images'][i]
for key in ['name', 'id']:
self.assertEqual(image_elem.get(key), str(image_dict[key]))
link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
# Check images_links
images_links = root.findall('{0}link'.format(ATOMNS))
for i, link in enumerate(fixture['images_links']):
for key, value in link.items():
self.assertEqual(images_links[i].get(key), value)
def test_index_zero_images(self):
serializer = images.MinimalImagesTemplate()
fixtures = {
'images': [],
}
output = serializer.serialize(fixtures)
root = etree.XML(output)
xmlutil.validate_schema(root, 'images_index')
image_elems = root.findall('{0}image'.format(NS))
self.assertEqual(len(image_elems), 0)
def test_detail(self):
serializer = images.ImagesTemplate()
fixture = {
'images': [
{
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'ACTIVE',
'server': {
'id': self.SERVER_UUID,
'links': [
{
'href': self.SERVER_HREF,
'rel': 'self',
},
{
'href': self.SERVER_BOOKMARK,
'rel': 'bookmark',
},
],
},
'links': [
{
'href': self.IMAGE_HREF % 1,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
},
],
},
{
'id': '2',
'name': 'Image2',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'status': 'SAVING',
'progress': 80,
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % 2,
'rel': 'self',
},
{
'href': self.IMAGE_BOOKMARK % 2,
'rel': 'bookmark',
},
],
},
]
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'images')
image_elems = root.findall('{0}image'.format(NS))
self.assertEqual(len(image_elems), 2)
for i, image_elem in enumerate(image_elems):
image_dict = fixture['images'][i]
for key in ['name', 'id', 'updated', 'created', 'status']:
self.assertEqual(image_elem.get(key), str(image_dict[key]))
link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(image_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
| {
"content_hash": "d46f7448baa2505c2514d4604f933bf6",
"timestamp": "",
"source": "github",
"line_count": 1694,
"max_line_length": 79,
"avg_line_length": 36.65761511216056,
"alnum_prop": 0.437324873586911,
"repo_name": "josephsuh/extra-specs",
"id": "3b100f3f7cac320a60b39de0cbc07b177f8aac2c",
"size": "62773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/test_images.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "6005171"
},
{
"name": "Shell",
"bytes": "26155"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as p
from sklearn import metrics
from nn.activations import LogisticActivation
from nn.layers import FullyConnectedLayer
from nn.network import NN
data = p.read_csv(r'./data/train.csv')
train_set = np.asarray(data)
cv_factor = 1.0 / 10
samples_len = 10000 #train_set.shape[0]
train_len = samples_len * (1 - cv_factor)
cv_len = samples_len - train_len
print samples_len, train_len, cv_len
features = train_set[:train_len, 1:]
target = train_set[:train_len, 0]
cv_features = train_set[train_len:train_len + cv_len, 1:]
cv_target = train_set[train_len:train_len + cv_len, 0]
features = (features - 127.0) / 255.0
cv_features = (cv_features - 127.0) / 255.0
nn = NN(features.shape[1])
nn.add_layer(FullyConnectedLayer(25, activation=LogisticActivation()))
nn.add_layer(FullyConnectedLayer(10, activation=LogisticActivation()))
nn.train(features, target)
y = nn.apply(cv_features)
predicted = np.zeros(cv_target.shape)
for i in range(0, y.shape[0]):
predicted[i] = np.argmax(y[i, :])
print "Classification report:\n{}\n".format(metrics.classification_report(cv_target, predicted))
print "Confusion matrix:\n{}".format(metrics.confusion_matrix(cv_target, predicted))
| {
"content_hash": "35a5fdfdd709773897577a07228422d3",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 96,
"avg_line_length": 28.833333333333332,
"alnum_prop": 0.7283236994219653,
"repo_name": "alnaav/shredNN",
"id": "cbd3b7fb77ae1e7759f4761652ccfad9822d76d1",
"size": "1211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nntest/drtest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10190"
}
],
"symlink_target": ""
} |
import breakdancer
from breakdancer import Condition, Effect, Action, Driver
TESTKEY = 'testkey'
######################################################################
# Conditions
######################################################################
class ExistsCondition(Condition):
def __call__(self, state):
return TESTKEY in state
class ExistsAsNumber(Condition):
def __call__(self, state):
try:
int(state[TESTKEY])
return True
except:
return False
class MaybeExistsAsNumber(ExistsAsNumber):
def __call__(self, state):
return TESTKEY not in state or ExistsAsNumber.__call__(self, state)
class DoesNotExistCondition(Condition):
def __call__(self, state):
return TESTKEY not in state
class NothingExistsCondition(Condition):
def __call__(self, state):
return not bool(state)
######################################################################
# Effects
######################################################################
class StoreEffect(Effect):
def __init__(self, v='0'):
self.v = v
def __call__(self, state):
state[TESTKEY] = self.v
class DeleteEffect(Effect):
def __call__(self, state):
del state[TESTKEY]
class FlushEffect(Effect):
def __call__(self, state):
state.clear()
class AppendEffect(Effect):
suffix = '-suffix'
def __call__(self, state):
state[TESTKEY] = state[TESTKEY] + self.suffix
class PrependEffect(Effect):
prefix = 'prefix-'
def __call__(self, state):
state[TESTKEY] = self.prefix + state[TESTKEY]
class ArithmeticEffect(Effect):
default = '0'
def __init__(self, by=1):
self.by = by
def __call__(self, state):
if TESTKEY in state:
state[TESTKEY] = str(max(0, int(state[TESTKEY]) + self.by))
else:
state[TESTKEY] = self.default
######################################################################
# Actions
######################################################################
class Set(Action):
effect = StoreEffect()
postconditions = [ExistsCondition()]
class Add(Action):
preconditions = [DoesNotExistCondition()]
effect = StoreEffect()
postconditions = [ExistsCondition()]
class Delete(Action):
preconditions = [ExistsCondition()]
effect = DeleteEffect()
postconditions = [DoesNotExistCondition()]
class Flush(Action):
effect = FlushEffect()
postconditions = [NothingExistsCondition()]
class Delay(Flush):
pass
class Append(Action):
preconditions = [ExistsCondition()]
effect = AppendEffect()
postconditions = [ExistsCondition()]
class Prepend(Action):
preconditions = [ExistsCondition()]
effect = PrependEffect()
postconditions = [ExistsCondition()]
class Incr(Action):
preconditions = [ExistsAsNumber()]
effect = ArithmeticEffect(1)
postconditions = [ExistsAsNumber()]
class Decr(Action):
preconditions = [ExistsAsNumber()]
effect = ArithmeticEffect(-1)
postconditions = [ExistsAsNumber()]
class IncrWithDefault(Action):
preconditions = [MaybeExistsAsNumber()]
effect = ArithmeticEffect(1)
postconditions = [ExistsAsNumber()]
class DecrWithDefault(Action):
preconditions = [MaybeExistsAsNumber()]
effect = ArithmeticEffect(-1)
postconditions = [ExistsAsNumber()]
######################################################################
# Driver
######################################################################
class EngineTestAppDriver(Driver):
def preSuite(self, seq):
print '#include "suite_stubs.h"'
print ""
def testName(self, seq):
return 'test_' + '_'.join(a.name for a in seq)
def startSequence(self, seq):
f = "static enum test_result %s" % self.testName(seq)
print ("%s(ENGINE_HANDLE *h,\n%sENGINE_HANDLE_V1 *h1) {"
% (f, " " * (len(f) + 1)))
def startAction(self, action):
if isinstance(action, Delay):
s = " delay(expiry+1);"
elif isinstance(action, Flush):
s = " flush(h, h1);"
elif isinstance(action, Delete):
s = ' del(h, h1);'
else:
s = ' %s(h, h1);' % (action.name)
print s
def postSuite(self, seq):
print """MEMCACHED_PUBLIC_API
engine_test_t* get_tests(void) {
static engine_test_t tests[] = {
"""
for seq in sorted(seq):
print ' {"%s",\n %s,\n test_setup, teardown, NULL},' % (
', '.join(a.name for a in seq),
self.testName(seq))
print """ {NULL, NULL, NULL, NULL, NULL}
};
return tests;
}"""
def endSequence(self, seq, state):
val = state.get(TESTKEY)
if val:
print ' checkValue(h, h1, "%s");' % val
else:
print ' assertNotExists(h, h1);'
print " return SUCCESS;"
print "}"
print ""
def endAction(self, action, state, errored):
value = state.get(TESTKEY)
if value:
vs = ' // value is "%s"' % value
else:
vs = ' // value is not defined'
if errored:
print " assertHasError();" + vs
else:
print " assertHasNoError();" + vs
if __name__ == '__main__':
breakdancer.runTest(breakdancer.findActions(globals().values()),
EngineTestAppDriver())
| {
"content_hash": "b500e29d3c37199c6e5203d89d4563a3",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 91,
"avg_line_length": 24.86936936936937,
"alnum_prop": 0.5234558956710741,
"repo_name": "zbase/memcached",
"id": "bcaeac9caa3d63a4dcc876f1025a6b754471e797",
"size": "6139",
"binary": false,
"copies": "1",
"ref": "refs/heads/zynga_1.8",
"path": "testsuite/breakdancer/engine_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "882471"
},
{
"name": "C++",
"bytes": "9725"
},
{
"name": "D",
"bytes": "10604"
},
{
"name": "Perl",
"bytes": "123757"
},
{
"name": "Python",
"bytes": "10486"
},
{
"name": "Shell",
"bytes": "6708"
}
],
"symlink_target": ""
} |
'''
Clean and trim raw Illumina read files
'''
import os
import sys
import glob
import csv
import shutil
import configparser
import subprocess
import pandas as pd
from secapr.helpers import FullPaths
def add_arguments(parser):
parser.add_argument(
'--input',
required=True,
action=FullPaths,
default=None,
help='The directory containing the unzipped .fastq or .fq files (raw read files)'
)
parser.add_argument(
'--config',
required=True,
help='A configuration file containing the adapter information and the sample names'
)
parser.add_argument(
'--output',
required=True,
action=FullPaths,
default=None,
help='The output directory where results will be saved'
)
parser.add_argument(
'--read_min',
type=int,
default=200000,
help='Set the minimum read count threshold. Any read file containing fewer reads than this minimum threshold will not be processed further. Default: %(default)s'
)
parser.add_argument(
'--index',
type=str,
choices=("single", "double"),
default="single",
help="Specify if single- or double-indexed adapters were used for the library preparation (essential information in order to interpret the control-file correctly).",
)
parser.add_argument(
'--seedMismatches',
type=int,
default=2,
help='Specifies the maximum mismatch count which will still allow a full match to be performed. For more information see trimmoatic tutorial. Default: %(default)s'
)
parser.add_argument(
'--palindromeClipThreshold',
type=int,
default=30,
help='Specifies how accurate the match between the two "adapter ligated" reads must be for PE palindrome read alignment. Default: %(default)s'
)
parser.add_argument(
'--simpleClipThreshold',
type=int,
default=10,
help='Specifies how accurate the match between any adapter etc. sequence must be against a read. For more information see trimmoatic tutorial. Default: %(default)s'
)
parser.add_argument(
'--windowSize',
type=int,
default=4,
help='Specifies the number of bases to average across. For more information see trimmoatic tutorial. Default: %(default)s'
)
parser.add_argument(
'--requiredQuality',
type=int,
default=15,
help='Specifies the average quality required. For more information see trimmoatic tutorial. Default: %(default)s'
)
parser.add_argument(
'--leadingQuality',
type=int,
default=20,
help='Specifies the minimum quality required to keep a base at the beginning of the read. For more information see trimmoatic tutorial. Default: %(default)s'
)
parser.add_argument(
'--trailingQuality',
type=int,
default=20,
help='Specifies the minimum quality required to keep a base at the end of a read. For more information see trimmoatic tutorial. Default: %(default)s'
)
parser.add_argument(
'--cropToLength',
type=int,
default=250,
help='The number of bases to keep, from the start of the read. Everything exceeding this length will be removed from the end of the read. For more information see trimmoatic tutorial. Default: %(default)s'
)
parser.add_argument(
'--headCrop',
type=int,
default=0,
help='The number of bases to remove from the start of the read. For more information see trimmoatic tutorial. Default: %(default)s'
)
parser.add_argument(
'--minLength',
type=int,
default=40,
help='Specifies the minimum length of reads to be kept. For more information see trimmoatic tutorial. Default: %(default)s'
)
def find_barcode(direction,sample_id,barcodes):
for element in barcodes:
tag1, tag2 = element[0].split("-")
if direction == tag1 and sample_id == tag2:
return element
else:
pass
def make_adapter_fasta(sample,sampledir,barcodes,conf,adapt_index):
adapters = os.path.join(sampledir,"%s_adapters.fasta" %sample)
try:
i7_barcode = find_barcode("i7",sample,barcodes)[1]
i7 = conf.get('adapters', 'i7')
i7 = i7.replace("*", i7_barcode)
i5 = conf.get('adapters', 'i5')
if adapt_index == "single":
try:
i5_barcode = find_barcode("i5",sample,barcodes)[1]
except:
i5_barcode = None
pass
if not i5_barcode is None:
print ("Reads are not single-indexed. Use '--index double' in your command.")
sys.exit()
if adapt_index == "double":
i5_barcode = find_barcode("i5",sample,barcodes)[1]
i5 = i5.replace("*", i5_barcode)
with open(adapters, 'w') as outf:
outf.write(">i5\n%s\n>i7\n%s\n" %(i5,i7))
return adapters
except TypeError:
return None
def longest_common_substring(s1, s2):
m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))]
longest, x_longest = 0, 0
for x in range(1, 1 + len(s1)):
for y in range(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
def rchop(thestring, ending):
if thestring.endswith(ending):
return thestring[:-len(ending)]
return thestring
def read_count(input):
# Use bash command to count the read number
number_reads = subprocess.getstatusoutput("gzip -cd %s | wc -l | awk '{ print $1/4 }' " %input)[1]
num = int(number_reads)
return num
def get_read_count_from_stats_file(stats_file):
F = open(stats_file,'r')
for line in F:
if line.startswith('Input'):
reads_before = line.split(' ')[3]
reads_after = line.split(' ')[6]
return(reads_before,reads_after)
def find_fastq_pairs(name_pattern,work_dir):
# Create a sorted (by name) list of only fastq/fq files in the input directory
included_extenstions = ['fastq','fq','fastq.gz','fq.gz']
file_list = [fn for fn in sorted(os.listdir(work_dir)) if any([fn.endswith(ext) for ext in included_extenstions])]
# Recover the longest substring of the filename that matches an element of the sample ID list from the control file
id_of_file = []
for fastq in file_list:
matches = []
for name in name_pattern:
matches.append(longest_common_substring(fastq,name))
# Give the longest match
sample_id = max(matches, key=len)
id_of_file.append(sample_id)
# Create a dictionary with the file names as keys and the corresponding sample IDs as values
file_info = dict(list(zip(file_list, id_of_file)))
# Reverse the dictionary
rev_file_info = {}
for key, value in list(file_info.items()):
rev_file_info.setdefault(value, set()).add(key)
# Check if the pattern defined as key represents a full element from the name_pattern list
for key, value in list(rev_file_info.items()):
if key not in name_pattern:
print(("Sample", key, "not found in control file. Skipped."))
rev_file_info.pop(key, None)
else:
pass
return rev_file_info
def quality_trim(r1,r2,sample_id,work_dir,out_dir,barcodes,conf,adapt_index,seed_mismatches,palindrome_clip_threshold,simple_clip_threshold,window_size,required_quality,leading,trailing,tail_crop,head_crop,min_length,stats_dict):
print(('#' * 50))
print(("Processing %s...\n" %sample_id))
# Forward and backward read file paths
R1 = "/".join((work_dir, r1))
R2 = "/".join((work_dir, r2))
# Names of output files
output = []
output_sample_dir = "%s/%s_clean" %(out_dir,sample_id)
if not os.path.exists(output_sample_dir):
os.makedirs(output_sample_dir)
for read in ["READ1", "READ1-single", "READ2", "READ2-single"]:
output.append(os.path.join(output_sample_dir, "%s_clean-%s.fastq.gz" %(sample_id,read)))
# Adapters to trim
adapter_fasta = make_adapter_fasta(sample_id,output_sample_dir,barcodes,conf,adapt_index)
# Command for trimmomatic
if not adapter_fasta == None:
stats_file = os.path.join(output_sample_dir, "%s_stats.txt" %sample_id)
command1 = [
"trimmomatic",
"PE",
"-phred33",
R1,
R2,
output[0],
output[1],
output[2],
output[3],
"ILLUMINACLIP:%s:%d:%d:%d" %(adapter_fasta,seed_mismatches,palindrome_clip_threshold,simple_clip_threshold),
"SLIDINGWINDOW:%d:%d" %(window_size,required_quality),
"LEADING:%d" %leading,
"TRAILING:%d" %trailing,
"CROP:%d" %tail_crop,
"HEADCROP:%d" %head_crop,
"MINLEN:%d" %min_length
]
with open(stats_file, 'w') as log_err_file:
try:
p1 = subprocess.Popen(command1, stderr=log_err_file)
p1.communicate()
before_reads, after_reads = get_read_count_from_stats_file(stats_file)
stats_dict.setdefault(sample_id,[before_reads,after_reads])
print(("%s successfully cleaned and trimmed. Stats are printed into %s" %(sample_id, os.path.join(output_sample_dir, "%s_stats.txt" %sample_id)) ))
print(("#" * 50))
except:
print ("Trimmomatic was interrupted or did not start properly. You may have entered impossible values in the trimmomatic settings or trimmomatic cannot be found in the environment. Rerun again with different values for the trimmomatic flags. If that doesn't solve the problem, reinstall the secapr environment, to ensure trimmomatic being installed in the correct path.")
sys.exit()
else:
print(("***********No barcodes for %s stored in config-file. Only quality trimming (no adapter trimming) will be performed***********" %sample_id))
with open(stats_file, 'w') as log_err_file:
command2 = [
"trimmomatic",
"PE",
"-phred33",
R1,
R2,
output[0],
output[1],
output[2],
output[3],
"SLIDINGWINDOW:%d:%d" %(window_size,required_quality),
"LEADING:%d" %leading,
"TRAILING:%d" %trailing,
"CROP:%d" %tail_crop,
"HEADCROP:%d" %head_crop,
"MINLEN:%d" %min_length
]
p2 = subprocess.Popen(command2, stderr=log_err_file)
p2.communicate()
before_reads, after_reads = get_read_count_from_stats_file(stats_file)
stats_dict.setdefault(sample_id,[before_reads,after_reads])
print(("%s successfully cleaned. Stats are printed into %s" %(sample_id, os.path.join(output_sample_dir, "%s_stats.txt" %sample_id)) ))
print(("#" * 50))
stats_df=pd.DataFrame.from_dict(stats_dict, orient='index').reset_index()
stats_df.columns = ['sample', 'fastq_read_pairs_raw','fastq_read_pairs_cleaned']
print(stats_df)
return(stats_df)
def main(args):
# Set working directory
work_dir = args.input
out_dir = args.output
# Get all trimmomatic settings
seed_mismatches = args.seedMismatches
palindrome_clip_threshold = args.palindromeClipThreshold
simple_clip_threshold = args.simpleClipThreshold
window_size = args.windowSize
required_quality = args.requiredQuality
leading = args.leadingQuality
trailing = args.trailingQuality
tail_crop = args.cropToLength
head_crop = args.headCrop
min_length = args.minLength
# Return the user-set or default read-threshold
read_threshold = args.read_min
print(("\n\n[Info:] Files with a read-count of less than %d are not being processed. If required you can set a different threshold, using the --read_min flag.\n" %read_threshold))
adapt_index = args.index
# Set conf as variable
conf = configparser.ConfigParser()
# Read the config argument and define input as string
conf.optionxform = str
conf.read(args.config)
# Call a config element
#import ipdb; ipdb.set_trace()
adapters = conf.items('adapters')
barcodes = conf.items('barcodes')
names = conf.items('names')
# Read the sample name information from the config file
names_id = []
for element in names:
names_id.append(element[0])
delimiter = []
for element in names:
delimiter.append(element[1])
if len(set(delimiter)) > 1:
quit('Multiple delimiters defined in [names] section of config file. Please choose consistent delimiter in filenames and config file!')
# Add delimiter after the sample-name
name_pattern = []
for i in range(len(names_id)):
name_pattern.append("%s%s" %(names_id[i],delimiter[i]))
# Create the output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Find samples for which both reads exist
read_pairs = find_fastq_pairs(name_pattern, work_dir)
if len(read_pairs) ==0:
sys.exit('***SECAPR-ERROR: No FASTQ files were found. Check if correct path is provided for --input flag')
# For each pair execute the quality_trim command (trimmomatic)
#read_count_file = open("%s/read_count_overview.txt" %out_dir, "w")
#countlog=csv.writer(read_count_file, delimiter='\t')
#countlog.writerow(["file","readcount"])
stats_dict = {}
for key, values in list(read_pairs.items()):
if len(values) > 1:
list_values = list(values)
clean_list = []
for i in range(len(list_values)):
fq_path = "/".join((work_dir, list_values[i]))
readcount = read_count(fq_path)
#countlog.writerow([list_values[i],readcount])
if readcount >= read_threshold:
clean_list.append(list_values[i])
else:
print(('***The file %s does not contain enough reads.***' %str(list_values[i])))
pass
if len(clean_list) > 1:
r1 = ""
r2 = ""
for fq in clean_list:
pattern_r1 = ["%sR1"%str(delimiter[0]),"%sREAD1"%str(delimiter[0]),"%sRead1"%str(delimiter[0]),"%sread1"%str(delimiter[0])]
pattern_r2 = ["%sR2"%str(delimiter[0]),"%sREAD2"%str(delimiter[0]),"%sRead2"%str(delimiter[0]),"%sread2"%str(delimiter[0])]
if any(pat in fq for pat in pattern_r1):
r1 = fq
elif any(pat in fq for pat in pattern_r2):
r2 = fq
else:
print(('#' * 50))
print(("No matching read designation (R1 or R2) found for %s" %fq))
print(('#' * 50))
# Remove the delimiter after the sample name in case it is part of the key
if key.endswith(delimiter[0]):
clean_key = rchop(key,delimiter[0])
stats_df = quality_trim(r1,r2,clean_key,work_dir,out_dir,barcodes,conf,adapt_index,seed_mismatches,palindrome_clip_threshold,simple_clip_threshold,window_size,required_quality,leading,trailing,tail_crop,head_crop,min_length,stats_dict)
else:
stats_df = quality_trim(r1,r2,key,work_dir,out_dir,barcodes,conf,adapt_index,seed_mismatches,palindrome_clip_threshold,simple_clip_threshold,window_size,required_quality,leading,trailing,tail_crop,head_crop,min_length,stats_dict)
stats_df.to_csv(os.path.join(out_dir,'sample_stats.txt'),sep = '\t',index=False)
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
| {
"content_hash": "5fda9cca42724b9da13972c68f825347",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 387,
"avg_line_length": 43.4343163538874,
"alnum_prop": 0.6045923091167212,
"repo_name": "AntonelliLab/seqcap_processor",
"id": "2010471f244012fbd7ab3592fca5943e3ff6ca65",
"size": "16259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "secapr/clean_reads_old.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4198"
},
{
"name": "Python",
"bytes": "584979"
},
{
"name": "R",
"bytes": "6478"
},
{
"name": "Shell",
"bytes": "708"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from rest_framework.response import Response
from sentry import features
from sentry.api.bases.organization import OrganizationEndpoint, OrganizationAdminPermission
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import AuthProvider
from sentry.tasks.auth import email_missing_links
ERR_NO_SSO = _("The SSO feature is not enabled for this organization.")
class OrganizationAuthProviderSendRemindersEndpoint(OrganizationEndpoint):
permission_classes = (OrganizationAdminPermission,)
def post(self, request, organization):
if not features.has("organizations:sso-basic", organization, actor=request.user):
return Response(ERR_NO_SSO, status=403)
try:
auth_provider = AuthProvider.objects.get(organization=organization)
except AuthProvider.DoesNotExist:
raise ResourceDoesNotExist
email_missing_links.delay(organization.id, request.user.id, auth_provider.key)
return Response(status=200)
| {
"content_hash": "2688b0886df5c7dba810d9fff5266cef",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 91,
"avg_line_length": 39.17857142857143,
"alnum_prop": 0.7675478577939836,
"repo_name": "beeftornado/sentry",
"id": "bc1700e9f29122498ad6c895d3b69870ea3798d2",
"size": "1097",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/organization_auth_provider_send_reminders.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
import struct
import data.libtctrout as libtctrout
# Window
# |
# +-LayeredGameWindow
# |
# +-MessageWindow
# |
# +-ChoiceWindow
# |
# +-InputWindow
# | |
# | +-ConsoleWindow
# |
# +-NodeWindow
# +
# |
# +-InventoryWindow
def convert(color):
"""Convert hex to tuple.
Has to be replaced to change graphical back-end library.
"""
if isinstance(color, str):
color = tuple(struct.unpack('BBB', color.decode('hex')))
return color
class WindowManager(object):
"""Contains all windows and handles positioning, drawing and layering.
Windows are referred to by ID.
Window position is in I{positions}, object in I{window_list}, layer in I{layers}
and visibility flag in I{visibilities}.
"""
def __init__(self,w,h,name,font='data/main.font'):
"""Initialisation method.
@type w: number
@param w: Main window width.
@type h: number
@param h: Main window height.
@type name: string
@param name: Main window title.
@type font: string
@param font: Path to font file.
"""
self.width = w
self.height = h
self.name = name
self.font = font
self.specific_init()
self.current_id = 0
self.window_list = { }
self.positions = { }
self.layers = { }
self.visibilities = { }
def __len__(self):
return len(self.window_list)
def __getitem__(self, item):
if isinstance(item, int):
return self.window_list[item]
else:
return NotImplemented
def __iter__(self):
return self.window_list.itervalues()
def __contains__(self, item):
return item in self.window_list
def specific_init(self):
"""Library-specific initialisation.
Subclass and replace to change graphical backend.
"""
self.root = libtctrout.RootWindow(self.name, self.width,
self.height, self.font)
self.root.clear()
def add_window(self, layer, type, w, h, x, y):
"""Adds a new window.
Higher layers are drawn last, thus show up on top.
@type layer: number
@param layer: Layer to add the window to.
@type type: subclass of L{Window}.
@param type: Type of new window.
@type w: number
@param w: Width of new window.
@type h: number
@param h: Height of new window.
@type x: number
@param x: X coordinate of new window's top-left corner.
@type y: number
@param y: Y coordinate of new window's top-left corner.
@rtype: number
@return: Created window.
"""
win = type(w,h,self)
id = self.current_id
self.window_list[id] = win
self.window_list[id].id = id
self.positions[id] = (x,y)
self.layers[id] = layer
self.visibilities[id] = 1
self.current_id += 1
return win
def get_layers(self):
"""Returns a list of the layers in the manager."""
return self.layers.values()
def get_visible(self, id):
"""Returns a window's visibility flag.
@type id: number
@param id: Window ID.
@rtype: bool
@return: Window's visibility flag.
"""
return self.visibilities[id]
def hide_window(self,id):
"""Set a window's visibility flag to I{False}.
@type id: number
@param id: Window ID.
"""
self.visibilities[id] = False
def show_window(self,id):
"""Set a window's visibility flag to I{True}.
@type id: number
@param id: Window ID.
"""
self.visibilities[id] = True
def clear_layer(self,layer):
"""Clear a layer of windows.
@type layer: number
@param layer: ID of layer to clear.
"""
for id in self.layers.iterkeys():
if self.layers[id] == layer:
self.window_list[id].clear()
self.remove_window(id)
def remove_window(self,id):
"""Remove a window.
@type id: number
@param id: Window ID.
"""
if id in self.window_list:
del self.window_list[id]
del self.positions[id]
del self.layers[id]
del self.visibilities[id]
def specific_flush(self):
"""Libtcod-specific flushing of console."""
self.root.update()
self.root.tick()
def specific_draw_window(self, id):
"""Libtcod-specific drawing.
@type id: number
@param id: Window ID.
"""
x, y = self.positions[id]
win = self.window_list[id]
self.root.blit(win.con, 0, 0, win.width, win.height, x, y)
def draw_all(self):
"""Draw all the layers, in order.
Lower layers are drawn first, so higher layers can cover them up.
"""
l = list(self.layers.itervalues())
l = list(set(l)) #remove duplicates and sort.
l.sort()
for layer in l:
for id in self.layers:
if self.layers[id] == layer:
self[id].update()
self.specific_draw_window(id)
self.specific_flush()
class Window(object):
"""Basic window object with basic drawing methods.
It takes care of drawing to its own off-screen console, the window manager handles blitting that to the main
console.
"""
def __init__(self,w,h,parent):
"""Initialisation method.
@type w: number
@param w: Width of window.
@type h: number
@param h: Height of window.
"""
self.width = w
self.height = h
self.parent = parent
self.bgcol = (255,0,0)
self.fgcol = (255,0,0)
self.border_tile = None
self.specific_init()
def specific_init(self):
"""Library-specific console initialisation."""
self.con = self.parent.root.new_window(self.width,self.height)
def specific_set_bg_col(self,x,y,bgcol):
"""Library-specific setting a position's background color.
@type x: number
@param x: X coordinate.
@type y: number
@param y: Y coordinate.
@type bgcol: libtcod.Color
@param bgcol: Color to set the cell's background color to.
"""
self.con.set_col(x, y, bg=bgcol)
def specific_set_fg_col(self,x,y,bgcol):
"""Library-specific setting a position's foreground color.
@type x: number
@param x: X coordinate.
@type y: number
@param y: Y coordinate.
@type bgcol: libtcod.Color
@param bgcol: Color to set the cell's foreground color to.
"""
self.con.set_col(x, y, fg=bgcol)
def specific_get_bg_col(self,x,y):
"""Library-specific getting a position's background color.
@type x: number
@param x: X coordinate.
@type y: number
@param y: Y coordinate.
"""
return self.con.get_col(x,y)[0]
def specific_get_fg_col(self,x,y):
"""Library-specific getting a position's foreground color.
@type x: number
@param x: X coordinate.
@type y: number
@param y: Y coordinate.
"""
return self.con.get_col(x,y)[1]
def specific_draw_char(self,x,y,char,bgcol,fgcol,bgset):
"""Library-specific drawing a character at a position with a specific color.
@type x: number
@param x: X coordinate.
@type y: number
@param y: Y coordinate.
@type char: char
@param char: Character to draw.
@type bgcol: libtcod.Color
@param bgcol: libtcod-specific color to draw background of character.
@type fgcol: libtcod.Color
@param fgcol: libtcod-specific color to draw character foreground in.
@type bgset: bool
@param bgset: If I{True}, fills background in, else, only the character itself is drawn.
"""
if not bgset:
bgcol = None
self.con.put_char(x, y, char, bg=bgcol, fg=fgcol)
def specific_print_line_rect(self,bgcol,fgcol,x,y,w,h,msg):
"""Library-specific drawing a string inside a filled rectangle.
@type bgcol: libtcod.Color
@param bgcol: Background libtcod-specific color to draw the rectangle.
@type fgcol: libtcod.Color
@param fgcol: Foreground libtcod-specific color to draw the text in.
@type x: number
@param x: X coordinate of top-left corner of rectangle.
@type y: number
@param y: Y coordinate of top-left corner of rectangle.
@type w: number
@param w: Width of rectangle.
@type h: number
@param h: Height of rectangle.
@type msg: string
@param msg: String to draw inside the rectangle.
"""
self.con.put_string(x,y,w,h,msg,bg=bgcol,fg=fgcol)
def specific_get_line_height(self,x,y,w,h,msg):
"""Library-specific predicting height of a string inside a rect without drawing.
Returns the height of the string wrapped around into the rectangle.
@type x: number
@param x: X coordinate of top-left corner of rectangle.
@type y: number
@param y: Y coordinate of top-left corner of rectangle.
@type w: number
@param w: Width of rectangle.
@type h: number
@param h: Height of rectangle.
@type msg: string
@param msg: String to fit inside the rectangle.
@rtype: number
@return: Number of lines the string would end up being..
"""
return self.con.test_string_height(x,y,w,h,msg)
def specific_console_clear(self,bgcol,fgcol):
"""Library-specific clearing of console to a color.
@type bgcol: libtcod.Color
@param bgcol: Background libtcod-specific color to clear to.
@type fgcol: libtcod.Color
@param fgcol: Foreground libtcod-specific color to clear to.
"""
self.con.clear(bg=bgcol,fg=fgcol)
def specific_h_line(self, x, y, w):
"""Library-specific drawing of a horizontal line onto the console."""
bgcol = convert(self.bgcol)
fgcol = convert(self.fgcol)
self.con.put_h_line(x,y,w,bg=bgcol,fg=fgcol)
def specific_v_line(self, x, y, h):
"""Library-specific drawing of a vertical line onto the console."""
bgcol = convert(self.bgcol)
fgcol = convert(self.fgcol)
self.con.put_v_line(x,y,h,bg=bgcol,fg=fgcol)
def specific_frame(self, x, y, w, h):
"""Library-specific drawing of a frame onto the console."""
bgcol = convert(self.bgcol)
fgcol = convert(self.fgcol)
self.con.put_frame(x,y,w,h,bg=bgcol,fg=fgcol)
def reverse_at(self,x,y):
"""Reverse the colors in a cell.
@type x: number
@param x: X coordinate.
@type y: number
@param y: Y coordinate.
"""
colb = self.specific_get_bg_col(x,y)
colf = self.specific_get_fg_col(x,y)
self.specific_set_bg_col(x,y,colf)
self.specific_set_fg_col(x,y,colb)
def reverse_rect(self,x,y,w,h):
for i in range(x, x+w):
for j in range(y, y+h):
self.reverse_at(i,j)
def reverse_line(self,y):
self.reverse_rect(2,y,self.width-4,1)
def set_border(self,tile):
"""Set the border type, (bgcol, char, fgcol, bgset) or None for no border.
Draws the border immediately afterwards.
@type tile: tuple
@param tile: Tuple of the form (bgcol, char, fgcol, bgset) that describes the window's border tile.
"""
self.border_tile = tile
self.restore_border()
def restore_border(self):
"""Restore border, automatically called after clearing or border draw_tiles.
Does nothing if I{border_tile} is None.
"""
if self.border_tile is not None:
tiles = [ ]
for i in range(self.width):
tiles += [[i,0]+self.border_tile]
tiles += [[i,self.height-1]+self.border_tile]
for i in range(self.height):
tiles += [[0,i]+self.border_tile]
tiles += [[self.width-1,i]+self.border_tile]
self.draw_tiles(tiles)
else:
self.specific_frame(0, 0, self.width, self.height)
def draw_tiles(self,tiles):
"""Draw tiles onto the window.
Does not clear window.
@type tiles: list
@param tiles: List of tuples of the form (x, y, bgcol, char, fgcol, bgset).
"""
for tile in tiles:
x, y, bgcol, char, fgcol, bgset = tile
if 0 <= x < self.width:
if 0 <= y < self.height:
self.specific_draw_char(x,y,char,bgcol,fgcol,bgset)
def draw_messages(self,msgs):
"""Draw messages onto the window.
Does not clear window.
@type msgs: list
@param msgs: List of tuples of the form (x, y, msg).
"""
for msg in msgs:
x, y, line = msg
self.specific_print_line_rect(convert(self.bgcol), convert(self.fgcol), x, y, self.width-4, 0, line)
def clear(self,bgcol = None, fgcol = None):
"""Clear window.
Defaults to clearing to the instance variables L{bgcol} and L{fgcol}.
Restores the border afterwards.
@type bgcol: tuple or libtcod.Color or None
@param bgcol: Background color to clear the window to.
@type fgcol: tuple or libtcod.Color or None
@param fgcol: Foreground color to clear the window to.
"""
if bgcol is None:
bgcol = self.bgcol
if fgcol is None:
fgcol = self.fgcol
bgcol = convert(bgcol)
fgcol = convert(fgcol)
self.specific_console_clear(bgcol,fgcol)
self.restore_border()
def update(self):
"""Internally-called updater method."""
pass
class LayeredGameWindow(Window):
"""Main game window, supports layers."""
def __init__(self,w,h,parent):
"""Initialisation method."""
super(LayeredGameWindow,self).__init__(w,h,parent)
self.layers = { }
def update_layer(self,layer,tiles):
"""Update a layer with a list of (x, y, bgcol, char, fgcol, bgset)."""
map = [ ]
for tile in tiles:
map += [tile]
self.layers[layer] = map
self.draw_layers()
def draw_layers(self):
"""Draw all layers, then restore border."""
self.clear()
s = list(self.layers.iterkeys())
s.sort()
for layer in s:
tiles = self.layers[layer]
self.draw_tiles(tiles)
self.restore_border()
class MessageWindow(Window):
"""Main messaging window type.
Scrolls down when too many messages have been added, but dumps old messages.
"""
def __init__(self,w,h,parent):
"""Initialisation method.
@type w: number
@param w: Width of window.
@type h: number
@param h: Height of window.
"""
super(MessageWindow,self).__init__(w,h,parent)
self.messages = [ ]
def get_msg_y(self,id):
"""Returns y coordinate of message."""
return self.messages[id][1]
def get_current_height(self):
"""Returns current height of messages in window.
@rtype: number
@return: Current height of messages in queue.
"""
y = 2 # padding for border
for msg in self.messages:
y += self.get_msg_height(y, msg[2])
return y
def get_msg_height(self, y, msg):
"""Convenience method."""
return self.specific_get_line_height(2, y, self.width-4, 0, msg)
def add_messages(self,msgs):
"""Add messages to queue.
@type msgs: list
@param msgs: List of strings to add to the queue.
"""
for msg in msgs:
y = self.get_current_height()
h = self.get_msg_height(y, msg)
while y + h > self.height - 2:
del self.messages[0]
y = 2
for tmp_msg in self.messages:
tmp_msg[1] = y
y += self.get_msg_height(y, tmp_msg[2])
y = self.get_current_height()
self.messages.append([2, y, msg])
self.clear()
self.draw_messages(self.messages)
def update(self):
super(MessageWindow,self).update()
self.clear()
self.draw_messages(self.messages)
class ChoiceWindow(MessageWindow):
"""Main menu type, single choice from multiple ones."""
def __init__(self,w,h,parent):
"""Initialisation method."""
super(ChoiceWindow,self).__init__(w, h,parent)
self.labels = [ ]
self.choices = [ ]
self.highlight = None
def set_label(self,label):
"""Set choice displayed label."""
self.labels = [ ]
for line in label.split("\n"):
self.labels.append(line)
def set_choices(self,choices):
"""Set available choices."""
self.choices = choices
if self.highlight is None or self.highlight>=len(self.choices):
self.highlight = 0
self.update()
def add_choice(self,choice):
"""Add new choice."""
self.choices.append(choice)
if self.highlight is None:
self.highlight = 0
self.update()
def set_highlight(self,id):
"""Set currently highlighted choice."""
if len(self.choices) > id >= 0:
self.highlight = id
self.update()
def move_up(self):
"""Move currently selected choice up."""
self.set_highlight(self.highlight-1)
def move_down(self):
"""Move currently selected choice down."""
self.set_highlight(self.highlight+1)
def enter(self):
"""Returns currently selected choice."""
return self.highlight
def update(self):
"""Internally-called updater method."""
self.messages = [ ]
self.draw_messages(self.messages)
msgs = [ ]
msgs += self.labels
msgs += [' ']
for id in range(len(self.choices)):
choice = self.choices[id]
line = " " + choice
msgs.append(line)
self.add_messages(msgs)
y = self.get_msg_y(self.highlight+len(self.labels)+1)
h = self.get_msg_height(0,self.choices[self.highlight]+' ' + str(self.highlight) +'. ')
for i in range(h):
self.reverse_line(y+i)
class InputWindow(MessageWindow):
"""One line input window.
Used to obtain one line of text from the player.
"""
def __init__(self,w,h,parent):
"""Initialisation method.
@type w: number
@param w: Width of window.
@type h: number
@param h: Height of window.
"""
super(InputWindow,self).__init__(w, h,parent)
self.label = ""
self.length = 5
self.input = ""
def set_label(self,label):
"""Sets label to display before text.
@type label: string
@param label: Text to display before text.
"""
self.label = label
self.update()
def set_length(self,length):
"""Sets max length of input.
@type length: number
@param length: Maximum number of characters that can be input.
"""
self.length = length
def add_char(self,char):
"""Adds character to input.
Does nothing if maximum length is already reached.
@type char: char
@param char: Character to add.
"""
if len(self.input)<self.length:
self.input += char
self.update()
def remove_char(self,id):
"""Removes character from input.
@type id: number
@param id: Index of character to remove.
"""
self.input = self.input[:id] + self.input[id+1:]
self.update()
def backspace(self):
"""Convenience method to remove last character."""
self.remove_char(len(self.input)-1)
def enter(self):
"""Returns current input and clears input.
@rtype: string
@return: Currently input string.
"""
inp = self.input
self.input = ""
self.update()
return inp
def update(self):
"""Internally-called updater method."""
self.clear()
msgs = [self.label + self.input]
self.messages = []
self.add_messages(msgs)
self.restore_border()
class ConsoleWindow(InputWindow):
"""Console-type window, basically an input window with a history."""
def __init__(self,w,h,parent):
super(ConsoleWindow,self).__init__(w,h,parent)
self.history = [ ]
def enter(self):
"""Returns current input, adds command to history and clears input."""
if self.input:
self.history.append(self.input)
return super(ConsoleWindow,self).enter()
def update(self):
self.clear()
self.messages = []
self.add_messages([" "])
if self.history:
self.history.reverse()
self.add_messages(self.history[:self.height-6])
self.history.reverse()
msgs = [(1,1,self.label + self.input)]
self.draw_messages(msgs)
self.reverse_rect(1,1,self.width-2,1)
self.specific_h_line(1,2,self.width-2)
self.restore_border()
class NodeWindow(MessageWindow):
"""Node-list window, like a tree view."""
def __init__(self,w,h,parent):
"""Initialisation method."""
super(NodeWindow,self).__init__(w,h,parent)
self.node_list = { }
self.node_parents = { }
self.node_meta = { }
self.add_node(0,None,"root",(0,))
self.highlight = None
def set_nodes(self, parents, texts, meta=None):
if meta is None:
meta = { }
self.node_list = { }
self.node_parents = { }
self.node_meta = { }
for id in texts.keys():
if id not in meta:
meta[id] = ()
self.add_node(id, parents[id], texts[id], meta[id])
self.update()
def add_node(self, id, parent, text, meta=()):
if id in self.node_list:
del self.node_list[id]
del self.node_parents[id]
del self.node_meta[id]
if parent not in self.node_list and parent is not None:
parent = 0
self.node_list[id] = text
self.node_parents[id] = parent
self.node_meta[id] = meta
def get_node_meta(self, id):
if id not in self.node_list:
return
return self.node_meta[id]
def highlight_node_by_meta(self, meta):
for id in range(len(self.node_meta)):
node = self.node_meta[id]
if node[0] == meta:
self.activated_node = id
break
def get_node_text(self, id):
if id not in self.node_list:
return "Error"
return self.node_list[id]
def rename_node(self, id, text):
if id not in self.node_list:
return
self.node_list[id] = text
def get_children(self, id):
ret = [ ]
for node in self.node_parents.keys():
if self.node_parents[node] == id:
ret.append(node)
return ret
def _recurse_children(self, id=0, ret=None, depth=0):
"""Internal recursion function that goes through all the children of root and returns a list of them."""
if not ret: ret = []
for child in self.get_children(id):
ret.append(" "*depth + "|-<" +self.get_node_text(child)+">")
ret = self._recurse_children(child, ret, depth+1)
return ret
def update(self):
self.clear()
msgs = ["<" + self.get_node_text(0) + ">"]
self._depth = 0
msgs += self._recurse_children()
self.messages = [ ]
self.add_messages(msgs)
self.restore_border()
if self.highlight is not None:
self.reverse_line(self.get_msg_y(self.highlight))
class InventoryWindow(NodeWindow):
def __init__(self,w,h,parent):
super(InventoryWindow,self).__init__(w,h,parent)
self.activated_node = None
def update(self):
super(InventoryWindow,self).update()
self.specific_h_line(1, self.height-3, self.width-2)
msg = "Active: "
if self.activated_node:
msg += "<" + self.node_list[self.activated_node] + ">"
self.draw_messages([(3, self.height-2, msg)])
class SwitchWindow(MessageWindow):
def __init__(self,w,h,parent):
super(SwitchWindow,self).__init__(w,h,parent)
self.switches = [ ]
self.choices = [ ]
self.meta = [ ]
self.highlight = 0
def set_switches(self, switches, choices, meta):
self.choices = list(choices)
self.switches = list(switches)
self.meta = list(meta)
def move_up(self):
if self.highlight:
self.highlight -= 1
if not self.highlight:
self.highlight += 1
def move_down(self):
if self.highlight:
self.highlight += 1
if self.highlight > len(self.switches) + 1:
self.highlight -= 1
def enter(self):
if self.highlight != len(self.switches) + 1:
self.choices[self.highlight-1] = not self.choices[self.highlight-1]
return None
else:
ret = (self.switches, self.choices, self.meta)
self.highlight = 0
return ret
def update(self):
self.clear()
msgs = [ ]
for id in range(len(self.switches)):
msg = ' '
msg += self.switches[id]
msg += ' ' * (self.width - 16 - len(self.switches[id]))
msg += ' ['
if self.choices[id]:
msg += 'X]'
else:
msg += ' ]'
msgs.append(msg)
while len(msgs) < self.height - 5:
msgs.append(" ")
msgs.append("< Close >")
self.messages = [ ]
self.add_messages(msgs)
if self.highlight <= len(self.switches):
self.reverse_line(self.highlight+1)
else:
self.reverse_line(self.height-3)
self.restore_border()
| {
"content_hash": "3d4c415812353f4090401501f75f4114",
"timestamp": "",
"source": "github",
"line_count": 877,
"max_line_length": 112,
"avg_line_length": 31.511972633979475,
"alnum_prop": 0.5356419163410044,
"repo_name": "RedMike/pYendor",
"id": "b1d7c0753e6a212cd1d72504ee0f3c3a0711b3f3",
"size": "27636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/graphics.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "138430"
}
],
"symlink_target": ""
} |
"""
Convert the ASCII download_file_types.asciipb proto into a binary resource.
We generate a separate variant of the binary proto for each platform,
each which contains only the values that platform needs.
"""
import os
import re
import sys
# Import the binary proto generator. Walks up to the root of the source tree
# which is five directories above, and the finds the protobufs directory from
# there.
proto_generator_path = os.path.normpath(os.path.join(os.path.abspath(__file__),
*[os.path.pardir] * 5 + ['chrome/browser/resources/protobufs']))
sys.path.insert(0, proto_generator_path)
from binary_proto_generator import BinaryProtoGenerator
# Map of platforms for which we can generate binary protos.
# This must be run after the custom imports.
# key: type-name
# value: proto-platform_type (int)
def PlatformTypes():
return {
"android": download_file_types_pb2.DownloadFileType.PLATFORM_ANDROID,
"chromeos": download_file_types_pb2.DownloadFileType.PLATFORM_CHROME_OS,
"linux": download_file_types_pb2.DownloadFileType.PLATFORM_LINUX,
"mac": download_file_types_pb2.DownloadFileType.PLATFORM_MAC,
"win": download_file_types_pb2.DownloadFileType.PLATFORM_WINDOWS,
}
def PrunePlatformSettings(file_type, default_settings, platform_type):
# Modify this file_type's platform_settings by keeping the only the
# best one for this platform_type. In order of preference:
# * Exact match to platform_type
# * PLATFORM_ANY entry
# * or copy from the default file type.
last_platform = -1
setting_match = None
for s in file_type.platform_settings:
# Enforce: sorted and no dups (signs of mistakes).
assert last_platform < s.platform, (
"Extension '%s' has duplicate or out of order platform: '%s'" %
(file_type.extension, s.platform))
last_platform = s.platform
# Pick the most specific match.
if ((s.platform == platform_type) or
(s.platform == download_file_types_pb2.DownloadFileType.PLATFORM_ANY and
setting_match is None)):
setting_match = s
# If platform_settings was empty, we'll fill in from the default
if setting_match is None:
assert default_settings is not None, (
"Missing default settings for platform %d" % platform_type)
setting_match = default_settings
# Now clear out the full list and replace it with 1 entry.
del file_type.platform_settings[:]
new_setting = file_type.platform_settings.add()
new_setting.CopyFrom(setting_match)
new_setting.ClearField('platform')
def FilterPbForPlatform(full_pb, platform_type):
""" Return a filtered protobuf for this platform_type """
assert type(platform_type) is int, "Bad platform_type type"
new_pb = download_file_types_pb2.DownloadFileTypeConfig();
new_pb.CopyFrom(full_pb)
# Ensure there's only one platform_settings for the default.
PrunePlatformSettings(new_pb.default_file_type, None, platform_type)
# This can be extended if we want to match weird extensions.
# Just no dots, non-UTF8, or uppercase chars.
invalid_char_re = re.compile('[^a-z0-9_-]')
# Filter platform_settings for each type.
uma_values_used = set()
extensions_used = set()
for file_type in new_pb.file_types:
assert not invalid_char_re.search(file_type.extension), (
"File extension '%s' contains non alpha-num-dash chars" % (
file_type.extension))
assert file_type.extension is not extensions_used, (
"Duplicate extension '%s'" % file_type.extension)
extensions_used.add(file_type.extension)
assert file_type.uma_value not in uma_values_used, (
"Extension '%s' reused UMA value %d." % (
file_type.extension, file_type.uma_value))
uma_values_used.add(file_type.uma_value)
# Modify file_type to include only the best match platform_setting.
PrunePlatformSettings(
file_type, new_pb.default_file_type.platform_settings[0], platform_type)
return new_pb
def FilterForPlatformAndWrite(full_pb, platform_type, outfile):
""" Filter and write out a file for this platform """
# Filter it
filtered_pb = FilterPbForPlatform(full_pb, platform_type);
# Serialize it
binary_pb_str = filtered_pb.SerializeToString()
# Write it to disk
open(outfile, 'wb').write(binary_pb_str)
def MakeSubDirs(outfile):
""" Make the subdirectories needed to create file |outfile| """
dirname = os.path.dirname(outfile)
if not os.path.exists(dirname):
os.makedirs(dirname)
class DownloadFileTypeProtoGenerator(BinaryProtoGenerator):
def ImportProtoModule(self):
import download_file_types_pb2
globals()['download_file_types_pb2'] = download_file_types_pb2
def EmptyProtoInstance(self):
return download_file_types_pb2.DownloadFileTypeConfig()
def ValidatePb(self, opts, pb):
""" Validate the basic values of the protobuf. The
file_type_policies_unittest.cc will also validate it by platform,
but this will catch errors earlier.
"""
assert pb.version_id > 0;
assert pb.sampled_ping_probability >= 0.0;
assert pb.sampled_ping_probability <= 1.0;
assert len(pb.default_file_type.platform_settings) >= 1;
assert len(pb.file_types) > 1;
def ProcessPb(self, opts, pb):
""" Generate one or more binary protos using the parsed proto. """
if opts.type is not None:
# Just one platform type
platform_enum = PlatformTypes()[opts.type]
outfile = os.path.join(opts.outdir, opts.outbasename)
FilterForPlatformAndWrite(pb, platform_enum, outfile)
else:
# Make a separate file for each platform
for platform_type, platform_enum in PlatformTypes().iteritems():
# e.g. .../all/77/chromeos/download_file_types.pb
outfile = os.path.join(opts.outdir,
str(pb.version_id),
platform_type,
opts.outbasename)
MakeSubDirs(outfile)
FilterForPlatformAndWrite(pb, platform_enum, outfile)
def AddCommandLineOptions(self, parser):
parser.add_option('-a', '--all', action="store_true", default=False,
help='Write a separate file for every platform. '
'Outfile must have a %d for version and %s for platform.')
parser.add_option('-t', '--type',
help='The platform type. One of android, chromeos, ' +
'linux, mac, win')
def AddExtraCommandLineArgsForVirtualEnvRun(self, opts, command):
if opts.type is not None:
command += ['-t', opts.type]
if opts.all:
command += ['-a']
def VerifyArgs(self, opts):
if (not opts.all and opts.type not in PlatformTypes()):
print "ERROR: Unknown platform type '%s'" % opts.type
self.opt_parser.print_help()
return False
if (bool(opts.all) == bool(opts.type)):
print "ERROR: Need exactly one of --type or --all"
self.opt_parser.print_help()
return False
return True
def main():
return DownloadFileTypeProtoGenerator().Run()
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "88a6dadff118d08854cffa51e3cb090b",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 80,
"avg_line_length": 37.1151832460733,
"alnum_prop": 0.6819015375934546,
"repo_name": "google-ar/WebARonARCore",
"id": "651ed98dad4eb62e0c576ad62aa1f958217cbaca",
"size": "7270",
"binary": false,
"copies": "3",
"ref": "refs/heads/webarcore_57.0.2987.5",
"path": "chrome/browser/resources/safe_browsing/gen_file_type_proto.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from distutils.core import setup, Extension
module1 = Extension('hello',
sources = ['HelloWorld.c'])
setup (name = 'PackageName',
version = '1.0',
description = 'This is a demo package',
ext_modules = [module1])
| {
"content_hash": "36bae1bf6d5af7e285df01b7efb1de84",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 47,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.592156862745098,
"repo_name": "sniemi/SamPy",
"id": "38b285d3a71aa1817fdc82c77e1b32dc714974cf",
"size": "255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/c_testing/setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "296"
},
{
"name": "C",
"bytes": "68436"
},
{
"name": "C++",
"bytes": "45956"
},
{
"name": "CSS",
"bytes": "35570"
},
{
"name": "Fortran",
"bytes": "45191"
},
{
"name": "HTML",
"bytes": "107435"
},
{
"name": "IDL",
"bytes": "13651"
},
{
"name": "JavaScript",
"bytes": "25435"
},
{
"name": "Makefile",
"bytes": "26035"
},
{
"name": "Matlab",
"bytes": "1508"
},
{
"name": "Perl",
"bytes": "59198"
},
{
"name": "PostScript",
"bytes": "1403536"
},
{
"name": "Prolog",
"bytes": "16061"
},
{
"name": "Python",
"bytes": "5763358"
},
{
"name": "R",
"bytes": "208346"
},
{
"name": "Rebol",
"bytes": "161"
},
{
"name": "Roff",
"bytes": "73616"
},
{
"name": "Ruby",
"bytes": "2032"
},
{
"name": "Shell",
"bytes": "41512"
},
{
"name": "Tcl",
"bytes": "44150"
},
{
"name": "TeX",
"bytes": "107783"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps
from feincms.module.page.models import Page
def add_page_to_extra_context(view_func):
"""
Adds the best-match page to the extra_context keyword argument. Mainly used
to provide generic views which integrate into the page module.
"""
def inner(request, *args, **kwargs):
import warnings
warnings.warn("The `add_page_to_extra_context` view decorator has been"
" deprecated, as have the function-based generic views in"
" `django.views.generic` and `feincms.views.generic`. Use the"
" `feincms.context_processors.add_page_if_missing` context processor"
" and Django's class-based generic views instead.",
DeprecationWarning, stacklevel=2)
kwargs.setdefault('extra_context', {})
kwargs['extra_context']['feincms_page'] = Page.objects.for_request(
request, best_match=True)
return view_func(request, *args, **kwargs)
return wraps(view_func)(inner)
def standalone(view_func):
"""
Marks the view method as standalone view; this means that
``HttpResponse`` objects returned from ``ApplicationContent``
are returned directly, without further processing.
"""
def inner(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
if isinstance(response, HttpResponse):
response.standalone = True
return response
return wraps(view_func)(inner)
| {
"content_hash": "c4ce1a6d57925325d305b86198383e49",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 81,
"avg_line_length": 35.44444444444444,
"alnum_prop": 0.6702194357366771,
"repo_name": "pjdelport/feincms",
"id": "8413d1c902c3aa4e863de07f80541914a8ff2383",
"size": "1595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feincms/views/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "59421"
},
{
"name": "Python",
"bytes": "428447"
},
{
"name": "Shell",
"bytes": "752"
}
],
"symlink_target": ""
} |
import sys
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
ECSDriver = get_driver(Provider.ALIYUN_ECS)
region = "cn-hangzhou"
your_access_key_id = ""
your_access_key_secret = ""
ecs = ECSDriver(your_access_key_id, your_access_key_secret, region=region)
sizes = ecs.list_sizes()
small = sizes[1]
locations = ecs.list_locations()
location = None
for each in locations:
if each.id == region:
location = each
break
if location is None:
print("could not find cn-qingdao location")
sys.exit(-1)
print(location.name)
images = ecs.list_images()
print("Found %d images" % len(images))
for each in images:
if "ubuntu" in each.id.lower():
image = each
break
else:
image = images[0]
print("Use image %s" % image)
sgs = ecs.ex_list_security_groups()
print("Found %d security groups" % len(sgs))
if len(sgs) == 0:
sg = ecs.ex_create_security_group(description="test")
print("Create security group %s" % sg)
else:
sg = sgs[0].id
print("Use security group %s" % sg)
nodes = ecs.list_nodes()
print("Found %d nodes" % len(nodes))
if len(nodes) == 0:
print("Starting create a new node")
data_disk = {
"size": 5,
"category": ecs.disk_categories.CLOUD,
"disk_name": "data_disk1",
"delete_with_instance": True,
}
auth = NodeAuthPassword("P@$$w0rd")
ex_internet_charge_type = ecs.internet_charge_types.BY_TRAFFIC
node = ecs.create_node(
image=image,
size=small,
name="test",
ex_security_group_id=sg,
ex_internet_charge_type=ex_internet_charge_type,
ex_internet_max_bandwidth_out=1,
ex_data_disk=data_disk,
auth=auth,
)
print("Created node %s" % node)
nodes = ecs.list_nodes()
for each in nodes:
print("Found node %s" % each)
| {
"content_hash": "f398aa51d08bf73620233b0b0abd8677",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 74,
"avg_line_length": 25.19736842105263,
"alnum_prop": 0.6422976501305483,
"repo_name": "apache/libcloud",
"id": "537cfdfdd7767b9e640bdf91b16baa58b9961220",
"size": "2697",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "demos/example_aliyun_ecs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2155"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9105547"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
} |
"""DBFData is used to provide standard interfaces to the dbfpy library."""
##
# Copyright 2013 Chad Spratt
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
from collections import OrderedDict
from filetypes.libraries.dbfpy import dbf
import table
import field
# GenericFile is just an interface
class DBFData(table.Table):
"""Wraps the dbfpy library with a set of standard functions."""
def __init__(self, filename, tablename=None, mode='r'):
super(DBFData, self).__init__(filename, tablename)
if mode == 'r':
# open the file
try:
self.filehandler = dbf.Dbf(self.filename, readOnly=True)
except dbf.header.struct.error:
raise table.InvalidDataError
else:
self.filehandler = None
# fieldattrorder, types, and blankvalues make up the format spec
self.fieldattrorder = ['Name', 'Type', 'Length', 'Decimals', 'Value']
# used to convert between dbf library and sqlite types
self.types = {'C': 'TEXT', 'N': 'NUMERIC', 'F': 'REAL', 'D': 'DATE',
'I': 'INTEGER', 'T': 'DATETIME', 'L': 'LOGICAL',
'TEXT': 'C', 'NUMERIC': 'N', 'REAL': 'F',
'DATETIME': 'T', 'LOGICAL': 'L',
'DATE': 'D', 'INTEGER': 'I'}
# These are abitrary values to use for fields in unmatched rows
self.blankvalues = OrderedDict([('TEXT', ''), ('DATE', (0, 0, 0)),
('DATETIME', None), ('INTEGER', 0),
('NUMERIC', 0), ('REAL', 0.0),
('LOGICAL', ' ')])
self.namelenlimit = 10
def getfields(self):
"""Returns the fields of the file as a list of Field objects"""
fieldlist = []
for fielddef in self.filehandler.fieldDefs:
# use ordereddict to enable accessing attributes by index
fieldattrs = OrderedDict([('type', self.types[fielddef.typeCode]),
('length', fielddef.length),
('decimals', fielddef.decimalCount)])
newfield = field.Field(fielddef.name, fieldattributes=fieldattrs,
dataformat='dbf', namelen=self.namelenlimit)
fieldlist.append(newfield)
return fieldlist
def setfields(self, fields):
"""Set the field definitions. Used before any records are added."""
# open the file
self.filehandler = dbf.Dbf(self.filename, new=True)
for genericfield in fields:
dbffield = self.convertfield(genericfield)
self.filehandler.addField((dbffield['name'],
self.types[dbffield['type']],
dbffield['length'],
dbffield['decimals']))
def addrecord(self, newrecord):
"""Append a new record to an output dbf file."""
rec = self.filehandler.newRecord()
for fieldname in newrecord:
rec[fieldname] = newrecord[fieldname]
rec.store()
def close(self):
"""Close the dbf file handler."""
# will be None if this was a dummy file
if self.filehandler is not None:
self.filehandler.close()
@classmethod
def convertfield(cls, unknownfield):
"""Convert a field of unknown type to a dbf field."""
dbffield = unknownfield.copy()
if dbffield.hasformat('dbf'):
dbffield.setformat('dbf')
else:
dbfattributes = OrderedDict()
if unknownfield.hasattribute('type'):
if unknownfield['type'] == 'OID':
dbfattributes['type'] = 'INTEGER'
dbfattributes['type'] = unknownfield['type']
else:
dbfattributes['type'] = 'TEXT'
if unknownfield.hasattribute('length'):
dbfattributes['length'] = unknownfield['length']
else:
dbfattributes['length'] = 254
if unknownfield.hasattribute('decimals'):
dbfattributes['decimals'] = unknownfield['decimals']
else:
dbfattributes['decimals'] = 0
dbffield.setformat('dbf', dbfattributes)
dbffield.namelenlimit = 10
dbffield.resetname()
return dbffield
def getfieldtypes(self):
"""Return a list of field types to populate a combo box."""
return self.blankvalues.keys()
def getblankvalue(self, outputfield):
"""Get a blank value that matches the type of a field."""
return self.blankvalues[outputfield['type']]
def getrecordcount(self):
"""Return the number of records in the file."""
return self.filehandler.recordCount
def backup(self):
"""Rename the file so the data isn't overwritten."""
backupcount = 1
backupname = self.filename + '.old'
backupnamelen = len(backupname)
# don't overwrite existing backups, if any
while os.path.isfile(backupname):
backupname = backupname[:backupnamelen] + str(backupcount)
backupcount += 1
os.rename(self.filename, backupname)
def __iter__(self):
"""Iterate through all the records in the file."""
recordcount = self.filehandler.recordCount
i = 0
while i < recordcount:
yield self.filehandler[i]
i += 1
| {
"content_hash": "238d3b47acc440d6d0761e9ad43a5e96",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 41.29251700680272,
"alnum_prop": 0.5700164744645799,
"repo_name": "chadspratt/AveryDB",
"id": "e834a57aa908a4cb48829729bb375e537af99d48",
"size": "6070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filetypes/dbfdata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Python",
"bytes": "1073038"
}
],
"symlink_target": ""
} |
"""
__graph_Signal.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
____________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_Signal(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 143, 82
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([162.0, 61.0, 162.0, 61.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([21.0, 18.0, 162.0, 98.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'moccasin')
self.gf6 = GraphicalForm(drawing, h, "gf6")
self.graphForms.append(self.gf6)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([77.0, 35.0, 77.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'Signal', width = '0', justify= 'left', stipple='' )
self.gf24 = GraphicalForm(drawing, h, 'gf24', fontObject=font)
self.graphForms.append(self.gf24)
if self.semanticObject: drawText = self.semanticObject.name.toString()
else: drawText = "<name>"
font = tkFont.Font( family='Helvetica', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([80.0, 80.0, 80.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = drawText, width = '0', justify= 'left', stipple='' )
self.attr_display["name"] = h
self.gf25 = GraphicalForm(drawing, h, 'gf25', fontObject=font)
self.graphForms.append(self.gf25)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_Signal
| {
"content_hash": "872d777b32510b6593489c323fe20db4",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 200,
"avg_line_length": 41.96774193548387,
"alnum_prop": 0.5964642582628747,
"repo_name": "levilucio/SyVOLT",
"id": "eff5b710c612e619f4e6a773c84b4187637f1431",
"size": "2602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GM2AUTOSAR_MM/graph_Signal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import logging
import sys
from cliff.app import App
from cliff.commandmanager import CommandManager
from .config import Configurations
VERSION = "0.3"
class PreprintApp(App):
log = logging.getLogger(__name__)
confs = Configurations()
def __init__(self):
super(PreprintApp, self).__init__(
description='Tools for writing latex papers',
version=VERSION,
command_manager=CommandManager('preprint.commands'))
def initialize_app(self, argv):
self.log.debug('initialize_app')
def build_option_parser(self, *args):
parser = super(PreprintApp, self).build_option_parser(*args)
parser.add_argument(
'--master',
default=self.confs.config('master'),
help='Name of master tex file')
return parser
def prepare_to_run_command(self, cmd):
self.log.debug('prepare_to_run_command %s', cmd.__class__.__name__)
def clean_up(self, cmd, result, err):
self.log.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.log.debug('got an error: %s', err)
def main(argv=sys.argv[1:]):
myapp = PreprintApp()
return myapp.run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "8936a01517d0d21a2e9d985b045fd6da",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 25.32,
"alnum_prop": 0.608214849921011,
"repo_name": "jonathansick/preprint",
"id": "85079c6cf71c9d76955eab41215f5a0329b1bfc9",
"size": "1266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "preprint/main.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "230723"
}
],
"symlink_target": ""
} |
from .MidiOutStream import MidiOutStream
class MidiToText(MidiOutStream):
"""
This class renders a midi file as text. It is mostly used for debugging
"""
#############################
# channel events
def channel_message(self, message_type, channel, data):
"""The default event handler for channel messages"""
print('message_type:%X, channel:%X, data size:%X' % (message_type, channel, len(data)))
def note_on(self, channel=0, note=0x40, velocity=0x40):
print('note_on - ch:%02X, note:%02X, vel:%02X time:%s' % (channel, note, velocity, self.rel_time()))
def note_off(self, channel=0, note=0x40, velocity=0x40):
print('note_off - ch:%02X, note:%02X, vel:%02X time:%s' % (channel, note, velocity, self.rel_time()))
def aftertouch(self, channel=0, note=0x40, velocity=0x40):
print('aftertouch', channel, note, velocity)
def continuous_controller(self, channel, controller, value):
print('controller - ch: %02X, cont: #%02X, value: %02X' % (channel, controller, value))
def patch_change(self, channel, patch):
print('patch_change - ch:%02X, patch:%02X' % (channel, patch))
def channel_pressure(self, channel, pressure):
print('channel_pressure', channel, pressure)
def pitch_bend(self, channel, value):
print('pitch_bend ch:%s, value:%s' % (channel, value))
#####################
## Common events
def system_exclusive(self, data):
print('system_exclusive - data size: %s' % len(date))
def song_position_pointer(self, value):
print('song_position_pointer: %s' % value)
def song_select(self, songNumber):
print('song_select: %s' % songNumber)
def tuning_request(self):
print('tuning_request')
def midi_time_code(self, msg_type, values):
print('midi_time_code - msg_type: %s, values: %s' % (msg_type, values))
#########################
# header does not really belong here. But anyhoo!!!
def header(self, format=0, nTracks=1, division=96):
print('format: %s, nTracks: %s, division: %s' % (format, nTracks, division))
print('----------------------------------')
print('')
def eof(self):
print('End of file')
def start_of_track(self, n_track=0):
print('Start - track #%s' % n_track)
def end_of_track(self):
print('End of track')
print('')
###############
# sysex event
def sysex_event(self, data):
print('sysex_event - datasize: %X' % len(data))
#####################
## meta events
def meta_event(self, meta_type, data):
print('undefined_meta_event:', meta_type, len(data))
def sequence_number(self, value):
print('sequence_number', number)
def text(self, text):
print('text', text)
def copyright(self, text):
print('copyright', text)
def sequence_name(self, text):
print('sequence_name:', text)
def instrument_name(self, text):
print('instrument_name:', text)
def lyric(self, text):
print('lyric', text)
def marker(self, text):
print('marker', text)
def cuepoint(self, text):
print('cuepoint', text)
def midi_ch_prefix(self, channel):
print('midi_ch_prefix', channel)
def midi_port(self, value):
print('midi_port:', value)
def tempo(self, value):
print('tempo:', value)
def smtp_offset(self, hour, minute, second, frame, framePart):
print('smtp_offset', hour, minute, second, frame, framePart)
def time_signature(self, nn, dd, cc, bb):
print('time_signature:', nn, dd, cc, bb)
def key_signature(self, sf, mi):
print('key_signature', sf, mi)
def sequencer_specific(self, data):
print('sequencer_specific', len(data))
if __name__ == '__main__':
# get data
test_file = 'test/midifiles/minimal.mid'
f = open(test_file, 'rb')
# do parsing
from .MidiInFile import MidiInFile
midiIn = MidiInFile(MidiToText(), f)
midiIn.read()
f.close()
| {
"content_hash": "a3001bb341c29d9f5b244ccc6cd58eb4",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 111,
"avg_line_length": 24.1731843575419,
"alnum_prop": 0.55095909406055,
"repo_name": "nanotone/midihub",
"id": "ec0a64c4db59f6f25ff1170c4c41024af51d4777",
"size": "4357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonmidi/MidiToText.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73091"
}
],
"symlink_target": ""
} |
def flushprint(*args, **kwargs): # type: ignore
if "flush" not in kwargs:
kwargs["flush"] = True
__builtins__["oldprint"](*args, **kwargs) # type: ignore
if "oldprint" not in __builtins__: # type: ignore
__builtins__["oldprint"] = __builtins__["print"] # type: ignore
__builtins__["print"] = flushprint # type: ignore
| {
"content_hash": "13c2989dcb6ead8e2e1fea4dfa65e187",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 68,
"avg_line_length": 34.6,
"alnum_prop": 0.5982658959537572,
"repo_name": "FinnStutzenstein/OpenSlides",
"id": "d5f8e4b54dd190a9083876cade574e4b88b44c1f",
"size": "392",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/openslides/utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "124087"
},
{
"name": "Dockerfile",
"bytes": "853"
},
{
"name": "HTML",
"bytes": "449533"
},
{
"name": "JavaScript",
"bytes": "159617"
},
{
"name": "Python",
"bytes": "1398362"
},
{
"name": "Smarty",
"bytes": "7293"
},
{
"name": "TypeScript",
"bytes": "2473991"
}
],
"symlink_target": ""
} |
import os
import unittest
import numpy as np
from tfsnippet.examples.utils import MLResults
from tfsnippet.utils import TemporaryDirectory
def head_of_file(path, n):
with open(path, 'rb') as f:
return f.read(n)
class MLResultTestCase(unittest.TestCase):
def test_imwrite(self):
with TemporaryDirectory() as tmpdir:
results = MLResults(tmpdir)
im = np.zeros([32, 32], dtype=np.uint8)
im[16:, ...] = 255
results.save_image('test.bmp', im)
file_path = os.path.join(tmpdir, 'test.bmp')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 2), b'\x42\x4d')
results.save_image('test.png', im)
file_path = os.path.join(tmpdir, 'test.png')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 8),
b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a')
results.save_image('test.jpg', im)
file_path = os.path.join(tmpdir, 'test.jpg')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 3), b'\xff\xd8\xff')
| {
"content_hash": "c5def583af5f38f8f670e94375ba24e5",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 73,
"avg_line_length": 32.86486486486486,
"alnum_prop": 0.5921052631578947,
"repo_name": "korepwx/tfsnippet",
"id": "ea5c897364b2fc0c754f6bda985b3a8473d1518a",
"size": "1216",
"binary": false,
"copies": "1",
"ref": "refs/heads/dependabot/pip/tensorflow-2.5.3",
"path": "tests/examples/utils/test_mlresult.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "471912"
}
],
"symlink_target": ""
} |
import sys
# Return the trie built from patterns
# in the form of a dictionary of dictionaries,
# e.g. {0:{'A':1,'T':2},1:{'C':3}}
# where the key of the external dictionary is
# the node ID (integer), and the internal dictionary
# contains all the trie edges outgoing from the corresponding
# node, and the keys are the letters on those edges, and the
# values are the node IDs to which these edges lead.
def build_trie(patterns):
tree = dict()
# write your code here
return tree
if __name__ == '__main__':
patterns = sys.stdin.read().split()[1:]
tree = build_trie(patterns)
for node in tree:
for c in tree[node]:
print("{}->{}:{}".format(node, tree[node][c], c))
| {
"content_hash": "9daf99f767c092d84b2cf0df65e10f9f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 61,
"avg_line_length": 32.36363636363637,
"alnum_prop": 0.6474719101123596,
"repo_name": "xunilrj/sandbox",
"id": "5764bed8110a53058ee8784f9588835e4e8a6144",
"size": "726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courses/coursera-sandiego-algorithms/algorithms-on-strings/trie/trie.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "235"
},
{
"name": "ASP.NET",
"bytes": "110"
},
{
"name": "Assembly",
"bytes": "28409"
},
{
"name": "Asymptote",
"bytes": "22978"
},
{
"name": "C",
"bytes": "1022035"
},
{
"name": "C#",
"bytes": "474510"
},
{
"name": "C++",
"bytes": "33387716"
},
{
"name": "CMake",
"bytes": "1288737"
},
{
"name": "CSS",
"bytes": "49690"
},
{
"name": "Common Lisp",
"bytes": "858"
},
{
"name": "Coq",
"bytes": "6200"
},
{
"name": "Dockerfile",
"bytes": "2912"
},
{
"name": "Elixir",
"bytes": "34"
},
{
"name": "Erlang",
"bytes": "8204"
},
{
"name": "F#",
"bytes": "33187"
},
{
"name": "Fortran",
"bytes": "20472"
},
{
"name": "GDB",
"bytes": "701"
},
{
"name": "GLSL",
"bytes": "7478"
},
{
"name": "Go",
"bytes": "8971"
},
{
"name": "HTML",
"bytes": "6469462"
},
{
"name": "Handlebars",
"bytes": "8236"
},
{
"name": "Haskell",
"bytes": "18581"
},
{
"name": "Java",
"bytes": "120539"
},
{
"name": "JavaScript",
"bytes": "5055335"
},
{
"name": "Jupyter Notebook",
"bytes": "1849172"
},
{
"name": "LLVM",
"bytes": "43431"
},
{
"name": "MATLAB",
"bytes": "462980"
},
{
"name": "Makefile",
"bytes": "1622666"
},
{
"name": "Objective-C",
"bytes": "2001"
},
{
"name": "PostScript",
"bytes": "45490"
},
{
"name": "PowerShell",
"bytes": "192867"
},
{
"name": "Python",
"bytes": "726138"
},
{
"name": "R",
"bytes": "31364"
},
{
"name": "Roff",
"bytes": "5700"
},
{
"name": "Ruby",
"bytes": "5865"
},
{
"name": "Rust",
"bytes": "797104"
},
{
"name": "Sage",
"bytes": "654"
},
{
"name": "Scala",
"bytes": "42383"
},
{
"name": "Shell",
"bytes": "154039"
},
{
"name": "TLA",
"bytes": "16779"
},
{
"name": "TSQL",
"bytes": "3412"
},
{
"name": "TeX",
"bytes": "6989202"
},
{
"name": "TypeScript",
"bytes": "8845"
},
{
"name": "Visual Basic .NET",
"bytes": "1090"
},
{
"name": "WebAssembly",
"bytes": "70321"
},
{
"name": "q",
"bytes": "13889"
}
],
"symlink_target": ""
} |
"""Support for locks which integrates with other components."""
import voluptuous as vol
from homeassistant.components.lock import (
PLATFORM_SCHEMA,
STATE_JAMMED,
STATE_LOCKING,
STATE_UNLOCKING,
LockEntity,
)
from homeassistant.const import (
CONF_NAME,
CONF_OPTIMISTIC,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
STATE_LOCKED,
STATE_ON,
STATE_UNLOCKED,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.script import Script
from .const import DOMAIN
from .template_entity import (
TEMPLATE_ENTITY_AVAILABILITY_SCHEMA_LEGACY,
TemplateEntity,
rewrite_common_legacy_to_modern_conf,
)
CONF_LOCK = "lock"
CONF_UNLOCK = "unlock"
DEFAULT_NAME = "Template Lock"
DEFAULT_OPTIMISTIC = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_LOCK): cv.SCRIPT_SCHEMA,
vol.Required(CONF_UNLOCK): cv.SCRIPT_SCHEMA,
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
).extend(TEMPLATE_ENTITY_AVAILABILITY_SCHEMA_LEGACY.schema)
async def _async_create_entities(hass, config):
"""Create the Template lock."""
config = rewrite_common_legacy_to_modern_conf(config)
return [TemplateLock(hass, config, config.get(CONF_UNIQUE_ID))]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template lock."""
async_add_entities(await _async_create_entities(hass, config))
class TemplateLock(TemplateEntity, LockEntity):
"""Representation of a template lock."""
def __init__(
self,
hass,
config,
unique_id,
):
"""Initialize the lock."""
super().__init__(config=config)
self._state = None
self._name = name = config.get(CONF_NAME)
self._state_template = config.get(CONF_VALUE_TEMPLATE)
self._command_lock = Script(hass, config[CONF_LOCK], name, DOMAIN)
self._command_unlock = Script(hass, config[CONF_UNLOCK], name, DOMAIN)
self._optimistic = config.get(CONF_OPTIMISTIC)
self._unique_id = unique_id
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def name(self):
"""Return the name of the lock."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this lock."""
return self._unique_id
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state in ("true", STATE_ON, STATE_LOCKED)
@property
def is_jammed(self):
"""Return true if lock is jammed."""
return self._state == STATE_JAMMED
@property
def is_unlocking(self):
"""Return true if lock is unlocking."""
return self._state == STATE_UNLOCKING
@property
def is_locking(self):
"""Return true if lock is locking."""
return self._state == STATE_LOCKING
@callback
def _update_state(self, result):
super()._update_state(result)
if isinstance(result, TemplateError):
self._state = None
return
if isinstance(result, bool):
self._state = STATE_LOCKED if result else STATE_UNLOCKED
return
if isinstance(result, str):
self._state = result.lower()
return
self._state = None
async def async_added_to_hass(self):
"""Register callbacks."""
self.add_template_attribute(
"_state", self._state_template, None, self._update_state
)
await super().async_added_to_hass()
async def async_lock(self, **kwargs):
"""Lock the device."""
if self._optimistic:
self._state = True
self.async_write_ha_state()
await self._command_lock.async_run(context=self._context)
async def async_unlock(self, **kwargs):
"""Unlock the device."""
if self._optimistic:
self._state = False
self.async_write_ha_state()
await self._command_unlock.async_run(context=self._context)
| {
"content_hash": "a471ae166b36eda13d606c1b26125a74",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 86,
"avg_line_length": 29.403973509933774,
"alnum_prop": 0.6337837837837837,
"repo_name": "home-assistant/home-assistant",
"id": "8f73796c37c8e816dcc1d4d922b63b8f4d16aebf",
"size": "4440",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/template/lock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
import re
import itertools
from Bio.Restriction import RanaConfig as RanaConf
from Bio.Restriction.DNAUtils import complement
"""
Usage :
PrintFormat allow to print the results from restriction analysis in 3
different format.
List, column or map.
the easiest way to use it is :
>>> from Rana.PrintFormat import PrintFormat
>>> from Rana.Restriction import AllEnzymes
>>> from Rana.fts import fts
>>> seq = fts(pBR322)
>>> dct = AllEnzymes.search(seq)
>>> new = PrintFormat()
>>> new.print_that(dct, '\n my pBR322 analysis\n\n','\n no site :\n\n')
my pBR322 analysis
AasI : 2169, 2582.
AatII : 4289.
...
More enzymes.
...
ZraI : 4287.
ZrmI : 3847.
no site :
AarI AatI Acc65I AcsI AcvI AdeI AflII AgeI
...
More enzymes.
...
Vha464I XapI XbaI XcmI XhoI XmaCI XmaI XmaJI
Zsp2I
>>>
Some of the methods of PrintFormat are meant to be overriden by derived
class.
"""
class PrintFormat(object) :
"""PrintFormat allow the printing of results of restriction analysis."""
ConsoleWidth = RanaConf.ConsoleWidth
NameWidth = RanaConf.NameWidth
MaxSize = RanaConf.MaxSize
Cmodulo = ConsoleWidth%NameWidth
PrefWidth = ConsoleWidth - Cmodulo
Indent = RanaConf.Indent
linesize = PrefWidth - NameWidth
def __init__(self) :
"""PrintFormat() -> new PrintFormat Instance"""
pass
def print_as(self, what='list') :
"""PF.print_as([what='list']) -> print the results as specified.
Valid format are :
'list' -> alphabetical order
'number' -> number of sites in the sequence
'map' -> a map representation of the sequence with the sites.
If you want more flexibility over-ride the virtual method make_format.
"""
if what == 'map' :
self.make_format = self._make_map
elif what == 'number' :
self.make_format = self._make_number
else :
self.make_format = self._make_list
return
def print_that(self, dct, title='', s1='') :
"""PF.print_that(dct, [title[, s1]]) -> Print dct nicely formatted.
dct is a dictionary as returned by a RestrictionBatch.search()
title is the title of the map.
It must be a formated string, i.e. you must include the line break.
s1 is the title separating the list of enzymes that have sites from
those without sites.
s1 must be a formatted string as well.
The format of print_that is a list."""
if not dct :
dct = self.results
ls, nc = [], []
for k, v in dct.iteritems() :
if v :
ls.append((k,v))
else :
nc.append(k)
print self.make_format(ls, title, nc, s1)
return
def make_format(self, cut=[], title='', nc=[], s1='') :
"""PF.make_format(cut, nc, title, s) -> string
Virtual method.
Here to be pointed to one of the _make_* methods.
You can as well create a new method and point make_format to it."""
return self._make_list(cut,title, nc,s1)
###### _make_* methods to be used with the virtual method make_format
def _make_list(self, ls,title, nc,s1) :
"""PF._make_number(ls,title, nc,s1) -> string.
return a string of form :
title.
enzyme1 : position1, position2.
enzyme2 : position1, position2, position3.
ls is a list of cutting enzymes.
title is the title.
nc is a list of non cutting enzymes.
s1 is the sentence before the non cutting enzymes."""
return self._make_list_only(ls, title) + self._make_nocut_only(nc, s1)
def _make_map(self, ls,title, nc,s1) :
"""PF._make_number(ls,title, nc,s1) -> string.
return a string of form :
title.
enzyme1, position
|
AAAAAAAAAAAAAAAAAAAAA...
|||||||||||||||||||||
TTTTTTTTTTTTTTTTTTTTT...
ls is a list of cutting enzymes.
title is the title.
nc is a list of non cutting enzymes.
s1 is the sentence before the non cutting enzymes."""
return self._make_map_only(ls, title) + self._make_nocut_only(nc, s1)
def _make_number(self, ls,title, nc,s1) :
"""PF._make_number(ls,title, nc,s1) -> string.
title.
enzyme which cut 1 time :
enzyme1 : position1.
enzyme which cut 2 times :
enzyme2 : position1, position2.
...
ls is a list of cutting enzymes.
title is the title.
nc is a list of non cutting enzymes.
s1 is the sentence before the non cutting enzymes."""
return self._make_number_only(ls, title)+self._make_nocut_only(nc,s1)
def _make_nocut(self, ls,title, nc,s1) :
"""PF._make_nocut(ls,title, nc,s1) -> string.
return a formatted string of the non cutting enzymes.
ls is a list of cutting enzymes -> will not be used.
Here for compatibility with make_format.
title is the title.
nc is a list of non cutting enzymes.
s1 is the sentence before the non cutting enzymes."""
return title + self._make_nocut_only(nc, s1)
def _make_nocut_only(self, nc, s1, ls =[],title='') :
"""PF._make_nocut_only(nc, s1) -> string.
return a formatted string of the non cutting enzymes.
nc is a list of non cutting enzymes.
s1 is the sentence before the non cutting enzymes.
"""
if not nc :
return s1
nc.sort()
st = ''
stringsite = s1 or '\n Enzymes which do not cut the sequence.\n\n'
Join = ''.join
for key in nc :
st = Join((st, str.ljust(str(key), self.NameWidth)))
if len(st) > self.linesize :
stringsite = Join((stringsite, st, '\n'))
st = ''
stringsite = Join((stringsite, st, '\n'))
return stringsite
def _make_list_only(self, ls, title, nc = [], s1 = '') :
"""PF._make_list_only(ls, title) -> string.
return a string of form :
title.
enzyme1 : position1, position2.
enzyme2 : position1, position2, position3.
...
ls is a list of results.
title is a string.
Non cutting enzymes are not included."""
if not ls :
return title
return self.__next_section(ls, title)
def _make_number_only(self, ls, title, nc = [], s1 ='') :
"""PF._make_number_only(ls, title) -> string.
return a string of form :
title.
enzyme which cut 1 time :
enzyme1 : position1.
enzyme which cut 2 times :
enzyme2 : position1, position2.
...
ls is a list of results.
title is a string.
Non cutting enzymes are not included."""
if not ls :
return title
ls.sort(lambda x,y : cmp(len(x[1]), len(y[1])))
iterator = iter(ls)
cur_len = 1
new_sect = []
for name, sites in iterator :
l = len(sites)
if l > cur_len :
title += "\n\nenzymes which cut %i times :\n\n"%cur_len
title = self.__next_section(new_sect, title)
new_sect, cur_len = [(name, sites)], l
continue
new_sect.append((name,sites))
title += "\n\nenzymes which cut %i times :\n\n"%cur_len
return self.__next_section(new_sect, title)
def _make_map_only(self, ls, title, nc = [], s1 = '') :
"""PF._make_map_only(ls, title) -> string.
return a string of form :
title.
enzyme1, position
|
AAAAAAAAAAAAAAAAAAAAA...
|||||||||||||||||||||
TTTTTTTTTTTTTTTTTTTTT...
ls is a list of results.
title is a string.
Non cutting enzymes are not included.
"""
if not ls :
return title
resultKeys = [str(x) for x,y in ls]
resultKeys.sort()
map = title or ''
enzymemap = {}
for (enzyme, cut) in ls :
for c in cut :
if enzymemap.has_key(c) :
enzymemap[c].append(str(enzyme))
else :
enzymemap[c] = [str(enzyme)]
mapping = enzymemap.keys()
mapping.sort()
cutloc = {}
x, counter, length = 0, 0, len(self.sequence)
for x in xrange(60, length, 60) :
counter = x - 60
l=[]
for key in mapping :
if key <= x :
l.append(key)
else :
cutloc[counter] = l
mapping = mapping[mapping.index(key):]
break
cutloc[x] = l
cutloc[x] = mapping
sequence = self.sequence.tostring()
revsequence = complement(sequence)
a = '|'
base, counter = 0, 0
emptyline = ' ' * 60
Join = ''.join
for base in xrange(60, length, 60) :
counter = base - 60
line = emptyline
for key in cutloc[counter] :
s = ''
if key == base :
for n in enzymemap[key] : s = ' '.join((s,n))
l = line[0:59]
lineo = Join((l, str(key), s, '\n'))
line2 = Join((l, a, '\n'))
linetot = Join((lineo, line2))
map = Join((map, linetot))
break
for n in enzymemap[key] : s = ' '.join((s,n))
k = key%60
lineo = Join((line[0:(k-1)], str(key), s, '\n'))
line = Join((line[0:(k-1)], a, line[k:]))
line2 = Join((line[0:(k-1)], a, line[k:], '\n'))
linetot = Join((lineo,line2))
map = Join((map,linetot))
mapunit = '\n'.join((sequence[counter : base],a * 60,
revsequence[counter : base],
Join((str.ljust(str(counter+1), 15), ' '* 30,
str.rjust(str(base), 15),'\n\n'))
))
map = Join((map, mapunit))
line = ' '* 60
for key in cutloc[base] :
s = ''
if key == length:
for n in enzymemap[key] :
s = Join((s,' ',n))
l = line[0:(length-1)]
lineo = Join((l,str(key),s,'\n'))
line2 = Join((l,a,'\n'))
linetot = Join((lineo, line2))
map = Join((map, linetot))
break
for n in enzymemap[key] : s = Join((s,' ',n))
k = key%60
lineo = Join((line[0:(k-1)],str(key),s,'\n'))
line = Join((line[0:(k-1)],a,line[k:]))
line2 = Join((line[0:(k-1)],a,line[k:],'\n'))
linetot = Join((lineo,line2))
map = Join((map,linetot))
mapunit = ''
mapunit = Join((sequence[base : length], '\n'))
mapunit = Join((mapunit, a * (length-base), '\n'))
mapunit = Join((mapunit,revsequence[base:length], '\n'))
mapunit = Join((mapunit, Join((str.ljust(str(base+1), 15), ' '*(
length-base-30),str.rjust(str(length), 15),
'\n\n'))))
map = Join((map,mapunit))
return map
###### private method to do lists :
def __next_section(self, ls, into) :
"""FP.__next_section(ls, into) -> string.
ls is a list of tuple (string, [int, int]).
into is a string to which the formatted ls will be added.
Format ls as a string of lines :
The form is :
enzyme1 : position1.
enzyme2 : position2, position3.
then add the formatted ls to tot
return tot."""
ls.sort()
indentation = '\n' + (self.NameWidth + self.Indent) * ' '
linesize = self.linesize - self.MaxSize
pat = re.compile("([\w,\s()]){1,%i}[,\.]"%linesize)
several, Join = '', ''.join
for name, sites in ls :
stringsite = ''
l = Join((', '.join([str(site) for site in sites]), '.'))
if len(l) > linesize :
#
# cut where appropriate and add the indentation
#
l = [x.group() for x in re.finditer(pat, l)]
stringsite = indentation.join(l)
else :
stringsite = l
into = Join((into,
str(name).ljust(self.NameWidth),' : ',stringsite,'\n'))
return into
| {
"content_hash": "628e93451411866223863b7dc8cee93d",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 84,
"avg_line_length": 32.96277915632754,
"alnum_prop": 0.4885576633544113,
"repo_name": "dbmi-pitt/DIKB-Micropublication",
"id": "5c98b5ca2217910845d7bf50fc66a162e30af296",
"size": "13541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/mp-scripts/Bio/Restriction/PrintFormat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3682138"
},
{
"name": "R",
"bytes": "4656"
},
{
"name": "Shell",
"bytes": "786"
}
],
"symlink_target": ""
} |
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import tensorflow as tf
from tensorflow._api import v2 as tf_v2
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, only_test_core_api, default False:
_ONLY_TEST_CORE_API_HELP = """
Some TF APIs are being moved outside of the tensorflow/ directory. There is
no garuntee which versions of these APIs will be present when running this
test. Therefore, do not error out on API changes in non-core TF code
if this flag is set.
"""
# DEFINE_boolean, verbose_diffs, default True:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER_V1 = 'tensorflow/tools/api/golden/v1'
_API_GOLDEN_FOLDER_V2 = 'tensorflow/tools/api/golden/v2'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
_NON_CORE_PACKAGES = ['estimator']
def _KeyToFilePath(key, api_version):
"""From a given key, construct a filepath.
Filepath will be inside golden folder for api_version.
"""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)
api_folder = (
_API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1)
return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub('((-[a-z]){1})', _ReplaceDashWithCaps,
base_filename_without_ext)
return api_object_key
def _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children):
"""A Visitor that crashes on subclasses of generated proto classes."""
# If the traversed object is a proto Message class
if not (isinstance(parent, type) and issubclass(parent, message.Message)):
return
if parent is message.Message:
return
# Check that it is a direct subclass of Message.
if message.Message not in parent.__bases__:
raise NotImplementedError(
'Object tf.%s is a subclass of a generated proto Message. '
'They are not yet supported by the API tools.' % path)
def _FilterNonCoreGoldenFiles(golden_file_list):
"""Filter out non-core API pbtxt files."""
filtered_file_list = []
filtered_package_prefixes = ['tensorflow.%s.' % p for p in _NON_CORE_PACKAGES]
for f in golden_file_list:
if any([
f.rsplit('/')[-1].startswith(pre) for pre in filtered_package_prefixes
]):
continue
filtered_file_list.append(f)
return filtered_file_list
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False,
additional_missing_object_message='',
api_version=2):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed from golden files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
update_goldens: Whether to update goldens when there are diffs found.
additional_missing_object_message: Message to print when a symbol is
missing.
api_version: TensorFlow API version to test.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed). %s' % (
key, additional_missing_object_message)
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Do not truncate diff
self.maxDiff = None # pylint: disable=invalid-name
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
messages = verbose_diffs if verbose else diffs
for i in range(diff_count):
print('Issue %d\t: %s' % (i + 1, messages[i]), file=sys.stderr)
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key, api_version)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key, api_version)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Fail if we cannot fix the test by updating goldens.
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
def testNoSubclassOfMessage(self):
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
# Skip compat.v1 and compat.v2 since they are validated in separate tests.
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
def testNoSubclassOfMessageV1(self):
if not hasattr(tf.compat, 'v1'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
traverse.traverse(tf_v2.compat.v1, visitor)
def testNoSubclassOfMessageV2(self):
if not hasattr(tf.compat, 'v2'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
traverse.traverse(tf_v2, visitor)
def _checkBackwardsCompatibility(self,
root,
golden_file_pattern,
api_version,
additional_private_map=None):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.private_map['tf'] = ['contrib']
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
if FLAGS.only_test_core_api:
public_api_visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
if additional_private_map:
public_api_visitor.private_map.update(additional_private_map)
traverse.traverse(root, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
golden_file_list = file_io.get_matching_files(golden_file_pattern)
if FLAGS.only_test_core_api:
golden_file_list = _FilterNonCoreGoldenFiles(golden_file_list)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens,
api_version=api_version)
def testAPIBackwardsCompatibility(self):
api_version = 1
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
self._checkBackwardsCompatibility(
tf,
golden_file_pattern,
api_version,
# Skip compat.v1 and compat.v2 since they are validated
# in separate tests.
additional_private_map={'tf.compat': ['v1', 'v2']})
# Also check that V1 API has contrib
self.assertTrue(
'tensorflow.python.util.lazy_loader.LazyLoader'
in str(type(tf.contrib)))
def testAPIBackwardsCompatibilityV1(self):
api_version = 1
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
self._checkBackwardsCompatibility(tf_v2.compat.v1, golden_file_pattern,
api_version)
def testAPIBackwardsCompatibilityV2(self):
api_version = 2
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
self._checkBackwardsCompatibility(
tf_v2,
golden_file_pattern,
api_version,
additional_private_map={'tf.compat': ['v1']})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
# TODO(mikecase): Create Estimator's own API compatibility test or
# a more general API compatibility test for use for TF components.
parser.add_argument(
'--only_test_core_api',
type=bool,
default=False,
help=_ONLY_TEST_CORE_API_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=True, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
| {
"content_hash": "d32763a76588b8c6b80b6cc1b70d2b80",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 80,
"avg_line_length": 37.538243626062325,
"alnum_prop": 0.6700626367821296,
"repo_name": "seanli9jan/tensorflow",
"id": "fb489ea80fbdad0612f5ae0af9d91fa0df534115",
"size": "13942",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/tools/api/tests/api_compatibility_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3301"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "446293"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50950243"
},
{
"name": "CMake",
"bytes": "198845"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285854"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "869263"
},
{
"name": "Jupyter Notebook",
"bytes": "2611125"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "62216"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40335927"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "487251"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
"""
Defines the Singleton metaclass available through :class:`objecttools.Singleton`
"""
__all__ = ('Singleton',)
class Singleton(type):
"""A metaclass for defining singletons"""
def __new__(mcs, name, bases, dict):
"""
Create a new :class:`Singleton` instance
:param name: Name of the new class
:type name: str
:param bases: Base classes of the new class
:type bases: Tuple[type, ...]
:param dict: Attributes of the new class
:type dict: Dict[Str, Any]
:return: A new class of type Singleton
:rtype: Singleton
"""
return super(Singleton, mcs).__new__(mcs, name, bases, dict)
def __init__(cls, name, bases, dict):
"""
Instantiate a :class:`Singleton` class
:param name: Name of the new class
:type name: str
:param bases: Base classes of the new class
:type bases: Tuple[type, ...]
:param dict: Attributes of the new class
:type dict: Dict[Str, Any]
:return: None
:rtype: NoneType
"""
super(Singleton, cls).__init__(name, bases, dict)
old_new = cls.__new__
__init__ = cls.__init__
this_cls = cls
def __new__(cls=None):
self = old_new(this_cls)
__init__(self)
this_cls.__self__ = self
def __new__(cls=None):
return self
this_cls.__new__ = staticmethod(__new__)
return self
cls.__new__ = staticmethod(__new__)
@classmethod
def create(mcs, name, dict=None, object_name=None):
"""
Create a new :class:`Singleton` class
:param name: Name of the new class (Used in its __repr__ if no object_name)
:type name: str
:param dict: Optional dictionary of the classes' attributes
:type dict: Optional[Dict[str, Any]]
:param object_name: Name of an instance of the singleton. Used in __repr__.
:type object_name: Optional[str]
:return: A new Singleton instance
:rtype: Singleton
"""
if dict is None:
dict = {}
_repr = name + '()' if object_name is None else object_name
def __repr__(self=None):
return _repr
dict.setdefault('__repr__', __repr__)
return mcs(name, (object,), dict)
@classmethod
def as_decorator(mcs, cls):
"""
Use :class:`Singleton` as a decorator for Python 2/3 compatibility::
@Singleton.as_decorator
class SingletonType(object):
def __repr__(self):
return 'singleton'
singleton = SingletonType()
:param cls: Class to become a singleton
:type cls: type
:return: The new singleton
:rtype: Singleton
"""
return mcs(cls.__name__, cls.__bases__, cls.__dict__.copy())
def __repr__(cls):
return cls.__name__
| {
"content_hash": "aecbe2d9afbb5229db84cfc4ae01fad2",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 83,
"avg_line_length": 30.01010101010101,
"alnum_prop": 0.5382026253786604,
"repo_name": "MitalAshok/objecttools",
"id": "727ad3ab2932f64c937462269f3bef982c2d0c4f",
"size": "2971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "objecttools/singletons.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52953"
}
],
"symlink_target": ""
} |
import ansible
from ansible.callbacks import vv
from ansible.errors import AnsibleError as ae
from ansible.runner.return_data import ReturnData
from ansible.utils import parse_kv, check_conditional
import ansible.utils.template as template
class ActionModule(object):
''' Create inventory groups based on variables '''
### We need to be able to modify the inventory
BYPASS_HOST_LOOP = True
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
# the group_by module does not need to pay attention to check mode.
# it always runs.
# module_args and complex_args have already been templated for the first host.
# Use them here only to check that a key argument is provided.
args = {}
if complex_args:
args.update(complex_args)
args.update(parse_kv(module_args))
if not 'key' in args:
raise ae("'key' is a required argument.")
vv("created 'group_by' ActionModule: key=%s"%(args['key']))
inventory = self.runner.inventory
result = {'changed': False}
### find all groups
groups = {}
for host in self.runner.host_set:
data = {}
data.update(inject)
data.update(inject['hostvars'][host])
conds = self.runner.conditional
if type(conds) != list:
conds = [ conds ]
next_host = False
for cond in conds:
if not check_conditional(cond, self.runner.basedir, data, fail_on_undefined=self.runner.error_on_undefined_vars):
next_host = True
break
if next_host:
continue
# Template original module_args and complex_args from runner for each host.
host_module_args = template.template(self.runner.basedir, self.runner.module_args, data)
host_complex_args = template.template(self.runner.basedir, self.runner.complex_args, data)
host_args = {}
if host_complex_args:
host_args.update(host_complex_args)
host_args.update(parse_kv(host_module_args))
group_name = host_args['key']
group_name = group_name.replace(' ','-')
if group_name not in groups:
groups[group_name] = []
groups[group_name].append(host)
result['groups'] = groups
### add to inventory
for group, hosts in groups.items():
inv_group = inventory.get_group(group)
if not inv_group:
inv_group = ansible.inventory.Group(name=group)
inventory.add_group(inv_group)
inventory.get_group('all').add_child_group(inv_group)
inv_group.vars = inventory.get_group_variables(group, update_cached=False, vault_password=inventory._vault_password)
for host in hosts:
if host in self.runner.inventory._vars_per_host:
del self.runner.inventory._vars_per_host[host]
inv_host = inventory.get_host(host)
if not inv_host:
inv_host = ansible.inventory.Host(name=host)
if inv_group not in inv_host.get_groups():
result['changed'] = True
inv_group.add_host(inv_host)
return ReturnData(conn=conn, comm_ok=True, result=result)
| {
"content_hash": "1be9613e6f21b0f9506f997550e32372",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 132,
"avg_line_length": 38.956043956043956,
"alnum_prop": 0.5858956276445698,
"repo_name": "mith1979/ansible_automation",
"id": "25c2073fa0fc3a7ec0629a70cb3ed377cf340b19",
"size": "4249",
"binary": false,
"copies": "134",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/ansible/runner/action_plugins/group_by.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
"This is the locale selecting middleware that will look at accept headers"
from django.conf import settings
from django.conf.urls.i18n import is_language_prefix_patterns_used
from django.http import HttpResponseRedirect
from django.urls import get_script_prefix, is_valid_path
from django.utils import translation
from django.utils.cache import patch_vary_headers
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
response_redirect_class = HttpResponseRedirect
def process_request(self, request):
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
i18n_patterns_used, prefixed_default_language = is_language_prefix_patterns_used(urlconf)
language = translation.get_language_from_request(request, check_path=i18n_patterns_used)
language_from_path = translation.get_language_from_path(request.path_info)
if not language_from_path and i18n_patterns_used and not prefixed_default_language:
language = settings.LANGUAGE_CODE
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
i18n_patterns_used, prefixed_default_language = is_language_prefix_patterns_used(urlconf)
if response.status_code == 404 and not language_from_path and i18n_patterns_used:
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
path_needs_slash = (
not path_valid and (
settings.APPEND_SLASH and not language_path.endswith('/')
and is_valid_path('%s/' % language_path, urlconf)
)
)
if path_valid or path_needs_slash:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path(force_append_slash=path_needs_slash).replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
return self.response_redirect_class(language_url)
if not (i18n_patterns_used and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
| {
"content_hash": "45ccf3f8bb49252ea83f1ae7007f34b7",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 98,
"avg_line_length": 47.37096774193548,
"alnum_prop": 0.658835546475996,
"repo_name": "asser/django",
"id": "c8cbd58685e8c89a3123be7166bf79c49465a01d",
"size": "2937",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/middleware/locale.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52294"
},
{
"name": "HTML",
"bytes": "174530"
},
{
"name": "JavaScript",
"bytes": "248130"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11350632"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import json
import logging
import os
from ..constants import IS_WINDOWS_PLATFORM
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
log = logging.getLogger(__name__)
def find_config_file(config_path=None):
paths = list(filter(None, [
config_path, # 1
config_path_from_environment(), # 2
os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
]))
log.debug(f"Trying paths: {repr(paths)}")
for path in paths:
if os.path.exists(path):
log.debug(f"Found file at path: {path}")
return path
log.debug("No config file found")
return None
def config_path_from_environment():
config_dir = os.environ.get('DOCKER_CONFIG')
if not config_dir:
return None
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
def home_dir():
"""
Get the user's home directory, using the same logic as the Docker Engine
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
"""
if IS_WINDOWS_PLATFORM:
return os.environ.get('USERPROFILE', '')
else:
return os.path.expanduser('~')
def load_general_config(config_path=None):
config_file = find_config_file(config_path)
if not config_file:
return {}
try:
with open(config_file) as f:
return json.load(f)
except (OSError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't
# be able to load any JSON data.
log.debug(e)
log.debug("All parsing attempts failed - returning empty config")
return {}
| {
"content_hash": "225d2cfa41f886ee175f0095c291ad27",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 77,
"avg_line_length": 26.12121212121212,
"alnum_prop": 0.6305104408352669,
"repo_name": "docker/docker-py",
"id": "8e24959a5d2163c7ffc9f6ae3f5b467338bfe7e7",
"size": "1724",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "docker/utils/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2114"
},
{
"name": "Makefile",
"bytes": "4612"
},
{
"name": "Python",
"bytes": "1073920"
},
{
"name": "Shell",
"bytes": "1165"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import hashlib
import os
from pex.common import open_zip
from pex.typing import TYPE_CHECKING, Generic
if TYPE_CHECKING:
from typing import IO, Callable, Iterable, Iterator, Optional, Protocol, Type, TypeVar
class HintedDigest(Protocol):
@property
def block_size(self):
# type: () -> int
pass
def update(self, data):
# type: (bytes) -> None
pass
class Hasher(HintedDigest, Protocol):
@property
def name(self):
# type: () -> str
pass
def digest(self):
# type: () -> bytes
pass
def hexdigest(self):
# type: () -> str
pass
class Fingerprint(str):
class Algorithm(object):
def __get__(
self,
_instance, # type: Optional[Fingerprint]
owner, # type: Type[Fingerprint]
):
# type: (...) -> str
alg = getattr(owner, "_alg", None)
if alg is None:
alg = owner.__name__[: -len(Fingerprint.__name__)].lower()
setattr(owner, "_alg", alg)
return alg
algorithm = Algorithm()
@classmethod
def new_hasher(cls, data=b""):
# type: (bytes) -> HashlibHasher
return HashlibHasher(cls, data=data)
def __eq__(self, other):
if isinstance(other, Fingerprint) and type(self) != type(other):
return False
return super(Fingerprint, self).__eq__(other)
def __ne__(self, other):
return not self == other
def __hash__(self):
# type: () -> int
return hash((self.algorithm, str(self)))
def new_fingerprint(
algorithm, # type: str
hexdigest, # type: str
):
# type: (...) -> Fingerprint
for subclass in Fingerprint.__subclasses__():
if subclass.algorithm == algorithm:
return subclass(hexdigest)
raise ValueError(
"There is no fingerprint type registered for hash algorithm {algorithm}. The supported "
"algorithms are: {algorithms}".format(
algorithm=algorithm,
algorithms=", ".join(fp.algorithm for fp in Fingerprint.__subclasses__()),
)
)
class Sha1Fingerprint(Fingerprint):
pass
class Sha256Fingerprint(Fingerprint):
pass
if TYPE_CHECKING:
_F = TypeVar("_F", bound=Fingerprint)
class HashlibHasher(Generic["_F"]):
def __init__(
self,
hexdigest_type, # type: Type[_F]
data=b"", # type: bytes
):
# type: (...) -> None
self._hexdigest_type = hexdigest_type
self._hasher = hashlib.new(hexdigest_type.algorithm, data)
@property
def name(self):
# type: () -> str
return self._hasher.name
@property
def block_size(self):
# type: () -> int
return self._hasher.block_size
def update(self, data):
# type: (bytes) -> None
self._hasher.update(data)
def digest(self):
# type: () -> bytes
return self._hasher.digest()
def hexdigest(self):
# type: () -> _F
return self._hexdigest_type(self._hasher.hexdigest())
class Sha1(HashlibHasher[Sha1Fingerprint]):
def __init__(self, data=b""):
# type: (bytes) -> None
super(Sha1, self).__init__(hexdigest_type=Sha1Fingerprint, data=data)
class Sha256(HashlibHasher[Sha256Fingerprint]):
def __init__(self, data=b""):
# type: (bytes) -> None
super(Sha256, self).__init__(hexdigest_type=Sha256Fingerprint, data=data)
class MultiDigest(object):
def __init__(self, digests):
# type: (Iterable[HintedDigest]) -> None
self._digests = digests
self._block_size = max(digest.block_size for digest in digests)
@property
def block_size(self):
# type: () -> int
return self._block_size
def update(self, data):
# type: (bytes) -> None
for digest in self._digests:
digest.update(data)
def update_hash(
filelike, # type: IO[bytes]
digest, # type: HintedDigest
):
# type: (...) -> None
"""Update the digest of a single file in a memory-efficient manner."""
block_size = digest.block_size * 1024
for chunk in iter(lambda: filelike.read(block_size), b""):
digest.update(chunk)
def file_hash(
path, # type: str
digest, # type: HintedDigest
):
# type: (...) -> None
"""Digest of a single file in a memory-efficient manner."""
with open(path, "rb") as fp:
update_hash(filelike=fp, digest=digest)
def dir_hash(
directory, # type: str
digest, # type: HintedDigest
dir_filter=lambda dirs: dirs, # type: Callable[[Iterable[str]], Iterable[str]]
file_filter=lambda files: files, # type: Callable[[Iterable[str]], Iterable[str]]
):
# type: (...) -> None
"""Digest the contents of a directory in a reproducible manner."""
def iter_files():
# type: () -> Iterator[str]
normpath = os.path.realpath(os.path.normpath(directory))
for root, dirs, files in os.walk(normpath):
dirs[:] = list(dir_filter(dirs))
for f in file_filter(files):
yield os.path.relpath(os.path.join(root, f), normpath)
names = sorted(iter_files())
# Always use / as the path separator, since that's what zip uses.
hashed_names = [n.replace(os.sep, "/") for n in names]
digest.update("".join(hashed_names).encode("utf-8"))
for name in names:
file_hash(os.path.join(directory, name), digest)
def zip_hash(
zip_path, # type: str
digest, # type: HintedDigest
relpath=None, # type: Optional[str]
dir_filter=lambda dirs: dirs, # type: Callable[[Iterable[str]], Iterable[str]]
file_filter=lambda files: files, # type: Callable[[Iterable[str]], Iterable[str]]
):
# type: (...) -> None
"""Digest the contents of a zip file in a reproducible manner.
If a `relpath` is specified, descend into that path only and take the hash with names recoded
in the hash relative to the `relpath`.
"""
with open_zip(zip_path) as zf:
namelist = (
[name for name in zf.namelist() if name.startswith(relpath)]
if relpath
else zf.namelist()
)
dirs = frozenset(name.rstrip("/") for name in namelist if name.endswith("/"))
accept_dir_names = frozenset(dir_filter(os.path.basename(d) for d in dirs))
accept_dirs = frozenset(d for d in dirs if os.path.basename(d) in accept_dir_names)
reject_dirs = dirs - accept_dirs
accept_files = sorted(
file_filter(
name
for name in namelist
if not (
name.endswith("/")
or any(name.startswith(reject_dir) for reject_dir in reject_dirs)
)
)
)
hashed_names = (
[os.path.relpath(name, relpath) for name in accept_files] if relpath else accept_files
)
digest.update("".join(hashed_names).encode("utf-8"))
for filename in accept_files:
update_hash(zf.open(filename, "r"), digest)
| {
"content_hash": "466eed5631e44f394e25bd42f80b8169",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 98,
"avg_line_length": 28.40392156862745,
"alnum_prop": 0.5715863592434074,
"repo_name": "pantsbuild/pex",
"id": "b239b3124bfc066fce63ea32e22b34ee2cc9faf7",
"size": "7375",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pex/hashing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1379"
},
{
"name": "Python",
"bytes": "2190044"
},
{
"name": "Shell",
"bytes": "1472"
}
],
"symlink_target": ""
} |
import numpy as np
import time
import cv2
from threading import Thread
#from picamera.array import PiRGBArray
#from picamera import PiCamera
import openface
from sklearn.externals import joblib
from livenessDetection import colbp, lpq
import threading
class DetectFace(Thread):
"""
Class constructor. Receives a sensor (MovementDetection)
"""
def __init__(self, hardware_control, Autonomous):
self.processing = False
self.autonomous = Autonomous
self.count = 1
self.accuracy = 0.0
self.predict = 0
self.liveness_prediction = 0.0
self.hardware_control = hardware_control
self.save_pictures = True
super(DetectFace, self).__init__()
"""
Run thread that detect faces. It detects faces when the sensor is 1 (up) which means there is some one at the
door. Then it calls the RecognizeFace class and if the person at the door is authorized to enter it opens the
door calling the Arduino.
"""
def run(self):
def liveness_detection(frame):
if not self.processing:
start = time.time()
threads = []
self.processing = True
# cv2.imwrite('frame'+str(count)+'.png', frame)
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
frame_ycrcb = cv2.cvtColor(frame, cv2.COLOR_BGR2YCrCb)
#print('Worker')
deltas = [1, 2, 4]
ases = [2, 4, 8]
lpq_res = lpq(frame_ycrcb[:, :, 0])
lpq_res += lpq(frame_ycrcb[:, :, 1])
lpq_res += lpq(frame_ycrcb[:, :, 2])
lpq_res = np.append(lpq_res, colbp(frame_hsv[:, :, 0], 1, 2))
lpq_res = np.append(lpq_res, colbp(frame_hsv[:, :, 1], 2, 4))
lpq_res = np.append(lpq_res, colbp(frame_hsv[:, :, 2], 4, 8))
predict = clf.predict([lpq_res])
prediction = 'person' if predict[0] == 0.0 else 'fake'
print('prediction: ' + prediction)
self.accuracy += (predict[0] == 0)
# accuracy /= 100.
self.liveness_prediction = self.accuracy / (self.count * 1.0)
#print(self.liveness_prediction)
# print('accuracy {} | {}'.format(self.accuracy / self.count * 1.0, self.liveness_prediction))
self.count += 1
end = time.time()
#print('Worker out {}'.format(end - start))
self.processing = False
return
cap = cv2.VideoCapture(0)
dLibFacePredictor = '/home/pi/openface/models/dlib/shape_predictor_68_face_landmarks.dat'
align = openface.AlignDlib(dLibFacePredictor)
m = n = imgDim = 96
time.sleep(0.02)
threads = []
clf = joblib.load('svm.pkl')
while True:
if self.hardware_control.sensorStatus == 1:
# Capture frame-by-frame
ret, frame = cap.read(0)
if frame is not None:
cv2.imshow('frame', frame)
if not self.processing and self.autonomous.training is False:
# buf = np.asarray(frame)
# rgbFrame = np.zeros((480, 640, 3), dtype=np.uint8)
# rgbFrame[:, :, 0] = buf[:, :, 2]
# rgbFrame[:, :, 1] = buf[:, :, 1]
# rgbFrame[:, :, 2] = buf[:, :, 0]
rgbFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
bbs = align.getAllFaceBoundingBoxes(rgbFrame)
print('number of faces: %s' % len(bbs))
face_number = 0
for bb in bbs:
try:
if bb.bottom() - bb.top() > m and bb.right() - bb.left() > n:
cropped_frame = frame[bb.top(): bb.bottom(), bb.left(): bb.right()].copy()
if self.save_pictures:
cv2.imshow('saved img', cropped_frame)
cv2.imwrite('face' + str(face_number) + '.jpg', cropped_frame)
cropped_frame = cv2.resize(cropped_frame, (imgDim, imgDim), interpolation=cv2.INTER_CUBIC)
face_number += 1
t = threading.Thread(target=liveness_detection, args=(cropped_frame,))
threads.append(t)
t.start()
except:
pass
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| {
"content_hash": "a2566b84b54511d36a263d5e3be3ce90",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 122,
"avg_line_length": 37.818897637795274,
"alnum_prop": 0.493649802206954,
"repo_name": "psilva-leo/AutonomousDoorman",
"id": "b26fce4b41ba58e17c0a5d9341389b9ddc0133ff",
"size": "4803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "system/detectFace2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "3117"
},
{
"name": "CSS",
"bytes": "904511"
},
{
"name": "HTML",
"bytes": "63551"
},
{
"name": "JavaScript",
"bytes": "14188"
},
{
"name": "Python",
"bytes": "40610"
},
{
"name": "TypeScript",
"bytes": "95004"
}
],
"symlink_target": ""
} |
import os
import logging
from pymongo import MongoClient
from tornado.httpserver import HTTPServer
from tornado.web import Application
from tornado.ioloop import IOLoop
from tornado.options import options, parse_command_line
from jinja2 import ChoiceLoader, FileSystemLoader
import settings
from lib.template import JinjaLoader
from lib.misc import install_tornado_shutdown_handler
from lib.request_handlers import SmartStaticFileHandler, MultiFileFinder
from lib.request_handlers import ResourceHandler
from handlers import BaseHandler, UploadHandler
class ShireWeb(object):
def get_settings(self, proj_template_path, proj_static_paths):
settings.define_app_options()
parse_command_line(final=True)
self_dir_path = os.path.abspath(os.path.dirname(__file__))
loader = ChoiceLoader([
FileSystemLoader(proj_template_path),
FileSystemLoader(os.path.join(self_dir_path, 'templates')),
])
SmartStaticFileHandler.file_finder = MultiFileFinder(
proj_static_paths,
os.path.join(self_dir_path, 'static'))
return {
'template_loader': JinjaLoader(loader=loader, auto_escape=False),
'debug': options.debug,
'cookie_secret': options.cookie_secret,
'xsrf_cookies': True,
'static_path': u'/static/',
'static_handler_class': SmartStaticFileHandler,
}
def __init__(self, routes, template_path, proj_static_paths=[],
**more_settings):
the_settings = self.get_settings(template_path, proj_static_paths)
the_settings.update(more_settings)
routes.extend([
(r'/upload', UploadHandler),
(r"/data/img/(.*)", ResourceHandler,
{
"valid_file_types": ["jpg", "png", "wmf"],
"path": options.img_store_path
}),
])
self.app = Application(routes, **the_settings)
self.app.db = self.setup_db()
self.app.img_prefix = options.img_prefix
self.app.img_store_path = options.img_store_path
def setup_db(self):
host = options.mongodb_host
port = options.mongodb_port
name = options.mongodb_name
logging.info("connecting to database %s:%s, %s ...", host, port, name)
client = MongoClient(host, port)
return client[name]
def run(self):
logging.info('Runing at http://127.0.0.1:%s in %s mode',
options.port, 'debug' if options.debug else 'production')
server = HTTPServer(self.app, xheaders=True)
server.listen(options.port)
install_tornado_shutdown_handler(IOLoop.instance(), server)
logging.info('Good to go!')
IOLoop.instance().start()
logging.info('Exiting...waiting for background jobs done...')
logging.info('Done. Bye.')
if __name__ == "__main__":
class _HelloworldHandler(BaseHandler):
def get(self):
self.render('hello.html', word='hello')
routes = [
(r'/', _HelloworldHandler),
]
server = ShireWeb(routes, 'templates')
server.run()
| {
"content_hash": "c1c184a55c300c03635c797bee784866",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 32.72164948453608,
"alnum_prop": 0.6219281663516069,
"repo_name": "leonsim/me",
"id": "f458136daa64c83dec12c210f6d29b57d19715ba",
"size": "3213",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shireweb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "120029"
},
{
"name": "JavaScript",
"bytes": "575915"
},
{
"name": "Python",
"bytes": "218405"
}
],
"symlink_target": ""
} |
import argparse
import os
def contains_license_block(source_file):
# This check is somewhat easier than in the engine because all sources need to
# have the same license.
py_license = """# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file."""
c_license = py_license.replace('#', '//')
# Make sure we don't read the entire file into memory.
read_size = (max(len(py_license), len(c_license)))
for lic in [c_license, py_license]:
with open(source_file) as source:
if source.read(read_size).startswith(lic):
return True
return False
def is_source_file(path):
known_extensions = [
'.cc',
'.cpp',
'.c',
'.h',
'.hpp',
'.py',
'.sh',
'.gn',
'.gni',
'.glsl',
'.sl.h',
'.vert',
'.frag',
'.tesc',
'.tese',
'.yaml',
'.dart',
]
for extension in known_extensions:
if os.path.basename(path).endswith(extension):
return True
return False
# Checks that all source files have the same license preamble.
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--source-root', type=str, required=True, help='The source root.'
)
args = parser.parse_args()
assert os.path.exists(args.source_root)
source_files = set()
for root, _, files in os.walk(os.path.abspath(args.source_root)):
for file in files:
file_path = os.path.join(root, file)
if is_source_file(file_path):
source_files.add(file_path)
for source_file in source_files:
if not contains_license_block(source_file):
raise Exception(
'Could not find valid license block in source ', source_file
)
if __name__ == '__main__':
main()
| {
"content_hash": "5822207a0a6eb80304fecc7a6668f75c",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 80,
"avg_line_length": 24.07894736842105,
"alnum_prop": 0.614207650273224,
"repo_name": "flutter/engine",
"id": "45607b7fb7f957706b5c322a6d8a3da2bb4517e5",
"size": "1992",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "impeller/tools/check_licenses.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5136"
},
{
"name": "C",
"bytes": "370641"
},
{
"name": "C++",
"bytes": "11452617"
},
{
"name": "CSS",
"bytes": "64"
},
{
"name": "Dart",
"bytes": "7391361"
},
{
"name": "Dockerfile",
"bytes": "3107"
},
{
"name": "GLSL",
"bytes": "129290"
},
{
"name": "HLSL",
"bytes": "898"
},
{
"name": "HTML",
"bytes": "200"
},
{
"name": "Java",
"bytes": "2441362"
},
{
"name": "JavaScript",
"bytes": "2368"
},
{
"name": "Objective-C",
"bytes": "437121"
},
{
"name": "Objective-C++",
"bytes": "1902211"
},
{
"name": "Python",
"bytes": "293788"
},
{
"name": "Roff",
"bytes": "55608"
},
{
"name": "Shell",
"bytes": "67789"
}
],
"symlink_target": ""
} |
'''Encodings support
This is the only module from which functions obtaining encoding should be
exported. Note: you should always care about errors= argument since it is not
guaranteed that encoding returned by some function can encode/decode given
string.
All functions in this module must always return a valid encoding. Most of them
are not thread-safe.
'''
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import locale
def get_preferred_file_name_encoding():
'''Get preferred file name encoding
'''
return (
sys.getfilesystemencoding()
or locale.getpreferredencoding()
or 'utf-8'
)
def get_preferred_file_contents_encoding():
'''Get encoding preferred for file contents
'''
return (
locale.getpreferredencoding()
or 'utf-8'
)
def get_preferred_output_encoding():
'''Get encoding that should be used for printing strings
.. warning::
Falls back to ASCII, so that output is most likely to be displayed
correctly.
'''
if hasattr(locale, 'LC_MESSAGES'):
return (
locale.getlocale(locale.LC_MESSAGES)[1]
or locale.getdefaultlocale()[1]
or 'ascii'
)
return (
locale.getdefaultlocale()[1]
or 'ascii'
)
def get_preferred_input_encoding():
'''Get encoding that should be used for reading shell command output
.. warning::
Falls back to latin1 so that function is less likely to throw as decoded
output is primary searched for ASCII values.
'''
if hasattr(locale, 'LC_MESSAGES'):
return (
locale.getlocale(locale.LC_MESSAGES)[1]
or locale.getdefaultlocale()[1]
or 'latin1'
)
return (
locale.getdefaultlocale()[1]
or 'latin1'
)
def get_preferred_arguments_encoding():
'''Get encoding that should be used for command-line arguments
.. warning::
Falls back to latin1 so that function is less likely to throw as
non-ASCII command-line arguments most likely contain non-ASCII
filenames and screwing them up due to unidentified locale is not much of
a problem.
'''
return (
locale.getdefaultlocale()[1]
or 'latin1'
)
def get_preferred_environment_encoding():
'''Get encoding that should be used for decoding environment variables
'''
return (
locale.getpreferredencoding()
or 'utf-8'
)
def get_unicode_writer(stream=sys.stdout, encoding=None, errors='replace'):
'''Get function which will write unicode string to the given stream
Writing is done using encoding returned by
:py:func:`get_preferred_output_encoding`.
:param file stream:
Stream to write to. Default value is :py:attr:`sys.stdout`.
:param str encoding:
Determines which encoding to use. If this argument is specified then
:py:func:`get_preferred_output_encoding` is not used.
:param str errors:
Determines what to do with characters which cannot be encoded. See
``errors`` argument of :py:func:`codecs.encode`.
:return: Callable which writes unicode string to the given stream using
the preferred output encoding.
'''
encoding = encoding or get_preferred_output_encoding()
if sys.version_info < (3,) or not hasattr(stream, 'buffer'):
return lambda s: stream.write(s.encode(encoding, errors))
else:
return lambda s: stream.buffer.write(s.encode(encoding, errors))
| {
"content_hash": "df7382feee5bd96e7a171a5964a6b6ed",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 84,
"avg_line_length": 26.276422764227643,
"alnum_prop": 0.729269801980198,
"repo_name": "S0lll0s/powerline",
"id": "76a51d814ce82a7261f9d22a783a062d645b28bc",
"size": "3263",
"binary": false,
"copies": "16",
"ref": "refs/heads/develop",
"path": "powerline/lib/encoding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3781"
},
{
"name": "Lua",
"bytes": "400"
},
{
"name": "Python",
"bytes": "785212"
},
{
"name": "Shell",
"bytes": "58319"
},
{
"name": "VimL",
"bytes": "17731"
}
],
"symlink_target": ""
} |
import itertools
import re
from eth_utils import (
remove_0x_prefix,
to_dict,
)
from .filesystem import (
is_under_path,
)
from .string import (
normalize_class_name,
)
def is_project_contract(contracts_source_dirs, contract_data):
return any(
is_under_path(source_dir, contract_data['source_path'])
for source_dir
in contracts_source_dirs
)
def is_test_contract(tests_dir, contract_data):
return is_under_path(tests_dir, contract_data['source_path'])
def package_contracts(contract_factories):
_dict = {
'__len__': lambda s: len(contract_factories),
'__iter__': lambda s: iter(contract_factories.items()),
'__contains__': lambda s, k: contract_factories.__contains__(k),
'__getitem__': lambda s, k: contract_factories.__getitem__(k),
'__setitem__': lambda s, k, v: contract_factories.__setitem__(k, v),
'keys': lambda s: contract_factories.keys(),
'values': lambda s: contract_factories.values(),
}
_dict.update(contract_factories)
return type('contracts', (object,), _dict)()
CONTRACT_FACTORY_FIELDS = {
'abi',
'asm',
'ast',
'bytecode',
'bytecode_runtime',
'clone_bin',
'dev_doc',
'interface',
'metadata',
'opcodes',
'src_map',
'src_map_runtime',
'user_doc',
}
def create_contract_factory(web3, contract_name, contract_data):
factory_kwargs = {
key: contract_data[key]
for key
in CONTRACT_FACTORY_FIELDS
if key in contract_data
}
return web3.eth.contract(
contract_name=normalize_class_name(contract_name),
**factory_kwargs
)
def construct_contract_factories(web3, compiled_contracts):
contract_classes = {
contract_name: create_contract_factory(
web3,
contract_name,
contract_data,
)
for contract_name, contract_data
in compiled_contracts.items()
}
return package_contracts(contract_classes)
@to_dict
def compute_direct_dependency_graph(compiled_contracts):
"""
Given a dictionary or mapping of compiled contract data, this returns a *shallow*
dependency graph of each contracts explicit link dependencies.
"""
for contract_data in compiled_contracts:
yield (
contract_data['name'],
contract_data['direct_dependencies'],
)
def compute_recursive_contract_dependencies(contract_name, dependency_graph):
"""
Recursive computation of the linker dependencies for a specific contract
within a contract dependency graph.
"""
direct_dependencies = dependency_graph.get(contract_name, set())
sub_dependencies = itertools.chain.from_iterable((
compute_recursive_contract_dependencies(dep, dependency_graph)
for dep in direct_dependencies
))
return set(itertools.chain(direct_dependencies, sub_dependencies))
CONTRACT_NAME_REGEX = '^[_a-zA-Z][_a-zA-Z0-9]*$'
def is_contract_name(value):
return bool(re.match(CONTRACT_NAME_REGEX, value))
EMPTY_BYTECODE_VALUES = {None, "0x"}
SWARM_HASH_PREFIX = "a165627a7a72305820"
SWARM_HASH_SUFFIX = "0029"
EMBEDDED_SWARM_HASH_REGEX = (
SWARM_HASH_PREFIX +
"[0-9a-zA-Z]{64}" +
SWARM_HASH_SUFFIX +
"$"
)
SWARM_HASH_REPLACEMENT = (
SWARM_HASH_PREFIX +
"<" +
"-" * 20 +
"swarm-hash-placeholder" +
"-" * 20 +
">" +
SWARM_HASH_SUFFIX
)
def compare_bytecode(left, right):
unprefixed_left = remove_0x_prefix(left)
unprefixed_right = remove_0x_prefix(right)
norm_left = re.sub(EMBEDDED_SWARM_HASH_REGEX, SWARM_HASH_REPLACEMENT, unprefixed_left)
norm_right = re.sub(EMBEDDED_SWARM_HASH_REGEX, SWARM_HASH_REPLACEMENT, unprefixed_right)
if len(norm_left) != len(unprefixed_left) or len(norm_right) != len(unprefixed_right):
raise ValueError(
"Invariant. Normalized bytecodes are not the correct lengths:" +
"\n- left (original) :" +
left, +
"\n- left (unprefixed):" +
unprefixed_left +
"\n- left (normalized):" +
norm_left +
"\n- right (original) :" +
right +
"\n- right (unprefixed):" +
unprefixed_right +
"\n- right (normalized):" +
norm_right
)
return norm_left == norm_right
def verify_contract_bytecode(web3, expected_bytecode, address):
"""
TODO: write tests for this.
"""
from populus.contracts.exceptions import BytecodeMismatch
# Check that the contract has bytecode
if expected_bytecode in EMPTY_BYTECODE_VALUES:
raise ValueError(
"Contract instances which contain an address cannot have empty "
"runtime bytecode"
)
chain_bytecode = web3.eth.getCode(address)
if chain_bytecode in EMPTY_BYTECODE_VALUES:
raise BytecodeMismatch(
"No bytecode found at address: {0}".format(address)
)
elif not compare_bytecode(chain_bytecode, expected_bytecode):
raise BytecodeMismatch(
"Bytecode found at {0} does not match compiled bytecode:\n"
" - chain_bytecode: {1}\n"
" - compiled_bytecode: {2}".format(
address,
chain_bytecode,
expected_bytecode,
)
)
def find_deploy_block_number(web3, address):
chain_bytecode = web3.eth.getCode(address, "latest")
if chain_bytecode in EMPTY_BYTECODE_VALUES:
raise NotImplementedError("Cannot find deploy transaction for address with empty code")
left = 0
right = web3.eth.blockNumber
while left + 1 < right:
middle = (left + right) // 2
# This only works if the node was not fast synced for the provided
# `block_identifier`.
try:
middle_code = web3.eth.getCode(address, block_identifier=middle)
except ValueError as err:
if 'Missing trie node' in str(err):
left = middle
continue
raise
if middle_code in EMPTY_BYTECODE_VALUES:
left = middle
else:
right = middle
code_at_right = web3.eth.getCode(address, block_identifier=right)
if code_at_right in EMPTY_BYTECODE_VALUES:
raise ValueError(
"Something went wrong with the binary search to find the deploy block"
)
code_at_previous_block = web3.eth.getCode(address, block_identifier=right - 1)
if code_at_previous_block not in EMPTY_BYTECODE_VALUES:
raise ValueError(
"Something went wrong with the binary search to find the deploy block"
)
return right
| {
"content_hash": "9500adcc684cf374ff0b7c1872c00f5b",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 95,
"avg_line_length": 28.447257383966246,
"alnum_prop": 0.6153960249184218,
"repo_name": "pipermerriam/populus",
"id": "b224206800b62c952283d171f474684916125ebe",
"size": "6742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "populus/utils/contracts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1210"
},
{
"name": "Python",
"bytes": "319837"
},
{
"name": "Shell",
"bytes": "544"
}
],
"symlink_target": ""
} |
import pytest
import libqtile.config
from libqtile import layout
from libqtile.confreader import Config
from test.layouts.layout_utils import assert_dimensions, assert_focus_path, assert_focused
class ZoomyConfig(Config):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
]
layouts = [
layout.Zoomy(columnwidth=200),
]
floating_layout = libqtile.resources.default_config.floating_layout
keys = []
mouse = []
screens = []
zoomy_config = pytest.mark.parametrize("manager", [ZoomyConfig], indirect=True)
@zoomy_config
def test_zoomy_one(manager):
manager.test_window("one")
assert_dimensions(manager, 0, 0, 600, 600)
manager.test_window("two")
assert_dimensions(manager, 0, 0, 600, 600)
manager.test_window("three")
assert_dimensions(manager, 0, 0, 600, 600)
assert_focus_path(manager, "two", "one", "three")
# TODO(pc) find a way to check size of inactive windows
@zoomy_config
def test_zoomy_window_focus_cycle(manager):
# setup 3 tiled and two floating clients
manager.test_window("one")
manager.test_window("two")
manager.test_window("float1")
manager.c.window.toggle_floating()
manager.test_window("float2")
manager.c.window.toggle_floating()
manager.test_window("three")
# test preconditions, Zoomy adds clients at head
assert manager.c.layout.info()["clients"] == ["three", "two", "one"]
# last added window has focus
assert_focused(manager, "three")
# assert window focus cycle, according to order in layout
assert_focus_path(manager, "two", "one", "float1", "float2", "three")
| {
"content_hash": "188b4ccaa9421c7f428dc0ac235266a7",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 90,
"avg_line_length": 29.927272727272726,
"alnum_prop": 0.6834750911300121,
"repo_name": "qtile/qtile",
"id": "287e9df6dd13abc92cbc10f45384325bc3ed7d89",
"size": "2984",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/layouts/test_zoomy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "625"
},
{
"name": "Python",
"bytes": "2202676"
},
{
"name": "Shell",
"bytes": "8090"
}
],
"symlink_target": ""
} |
import unittest, sys
from ctypes import *
import _ctypes_test
ctype_types = [c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint,
c_long, c_ulong, c_longlong, c_ulonglong, c_double, c_float]
python_types = [int, int, int, int, int, int,
int, int, int, int, float, float]
class PointersTestCase(unittest.TestCase):
def test_pointer_crash(self):
class A(POINTER(c_ulong)):
pass
POINTER(c_ulong)(c_ulong(22))
# Pointer can't set contents: has no _type_
self.failUnlessRaises(TypeError, A, c_ulong(33))
def test_pass_pointers(self):
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_p_p
func.restype = c_long
i = c_int(12345678)
## func.argtypes = (POINTER(c_int),)
address = func(byref(i))
self.failUnlessEqual(c_int.from_address(address).value, 12345678)
func.restype = POINTER(c_int)
res = func(pointer(i))
self.failUnlessEqual(res.contents.value, 12345678)
self.failUnlessEqual(res[0], 12345678)
def test_change_pointers(self):
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_p_p
i = c_int(87654)
func.restype = POINTER(c_int)
func.argtypes = (POINTER(c_int),)
res = func(pointer(i))
self.failUnlessEqual(res[0], 87654)
self.failUnlessEqual(res.contents.value, 87654)
# C code: *res = 54345
res[0] = 54345
self.failUnlessEqual(i.value, 54345)
# C code:
# int x = 12321;
# res = &x
res.contents = c_int(12321)
self.failUnlessEqual(i.value, 54345)
def test_callbacks_with_pointers(self):
# a function type receiving a pointer
PROTOTYPE = CFUNCTYPE(c_int, POINTER(c_int))
self.result = []
def func(arg):
for i in range(10):
## print arg[i],
self.result.append(arg[i])
## print
return 0
callback = PROTOTYPE(func)
dll = CDLL(_ctypes_test.__file__)
# This function expects a function pointer,
# and calls this with an integer pointer as parameter.
# The int pointer points to a table containing the numbers 1..10
doit = dll._testfunc_callback_with_pointer
## i = c_int(42)
## callback(byref(i))
## self.failUnless(i.value == 84)
doit(callback)
## print self.result
doit(callback)
## print self.result
def test_basics(self):
from operator import delitem
for ct, pt in zip(ctype_types, python_types):
i = ct(42)
p = pointer(i)
## print type(p.contents), ct
self.failUnless(type(p.contents) is ct)
# p.contents is the same as p[0]
## print p.contents
## self.failUnless(p.contents == 42)
## self.failUnless(p[0] == 42)
self.assertRaises(TypeError, delitem, p, 0)
def test_from_address(self):
from array import array
a = array('i', [100, 200, 300, 400, 500])
addr = a.buffer_info()[0]
p = POINTER(POINTER(c_int))
## print dir(p)
## print p.from_address
## print p.from_address(addr)[0][0]
def test_other(self):
class Table(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int)]
pt = pointer(Table(1, 2, 3))
self.failUnlessEqual(pt.contents.a, 1)
self.failUnlessEqual(pt.contents.b, 2)
self.failUnlessEqual(pt.contents.c, 3)
pt.contents.c = 33
from ctypes import _pointer_type_cache
del _pointer_type_cache[Table]
def test_basic(self):
p = pointer(c_int(42))
# Although a pointer can be indexed, it ha no length
self.assertRaises(TypeError, len, p)
self.failUnlessEqual(p[0], 42)
self.failUnlessEqual(p.contents.value, 42)
def test_charpp(self):
"""Test that a character pointer-to-pointer is correctly passed"""
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_c_p_p
func.restype = c_char_p
argv = (c_char_p * 2)()
argc = c_int( 2 )
argv[0] = 'hello'
argv[1] = 'world'
result = func( byref(argc), argv )
assert result == 'world', result
def test_bug_1467852(self):
# http://sourceforge.net/tracker/?func=detail&atid=532154&aid=1467852&group_id=71702
x = c_int(5)
dummy = []
for i in range(32000):
dummy.append(c_int(i))
y = c_int(6)
p = pointer(x)
pp = pointer(p)
q = pointer(y)
pp[0] = q # <==
self.failUnlessEqual(p[0], 6)
def test_c_void_p(self):
# http://sourceforge.net/tracker/?func=detail&aid=1518190&group_id=5470&atid=105470
if sizeof(c_void_p) == 4:
self.failUnlessEqual(c_void_p(0xFFFFFFFF).value,
c_void_p(-1).value)
self.failUnlessEqual(c_void_p(0xFFFFFFFFFFFFFFFF).value,
c_void_p(-1).value)
elif sizeof(c_void_p) == 8:
self.failUnlessEqual(c_void_p(0xFFFFFFFF).value,
0xFFFFFFFF)
self.failUnlessEqual(c_void_p(0xFFFFFFFFFFFFFFFF).value,
c_void_p(-1).value)
self.failUnlessEqual(c_void_p(0xFFFFFFFFFFFFFFFFFFFFFFFF).value,
c_void_p(-1).value)
self.assertRaises(TypeError, c_void_p, 3.14) # make sure floats are NOT accepted
self.assertRaises(TypeError, c_void_p, object()) # nor other objects
def test_pointers_bool(self):
# NULL pointers have a boolean False value, non-NULL pointers True.
self.failUnlessEqual(bool(POINTER(c_int)()), False)
self.failUnlessEqual(bool(pointer(c_int())), True)
self.failUnlessEqual(bool(CFUNCTYPE(None)(0)), False)
self.failUnlessEqual(bool(CFUNCTYPE(None)(42)), True)
# COM methods are boolean True:
if sys.platform == "win32":
mth = WINFUNCTYPE(None)(42, "name", (), None)
self.failUnlessEqual(bool(mth), True)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1cba9b67a6706622801314f597a4a603",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 92,
"avg_line_length": 33.151041666666664,
"alnum_prop": 0.5525530243519245,
"repo_name": "MalloyPower/parsing-python",
"id": "86429c03c9d116561be633a315fe2864551f87c6",
"size": "6365",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.1/Lib/ctypes/test/test_pointers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.liuyanqu, name='liuyanqu'),
]
| {
"content_hash": "6e14f52a210d2369d13ac9441d8ac8d7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 48,
"avg_line_length": 18.428571428571427,
"alnum_prop": 0.6356589147286822,
"repo_name": "LoveKano/hs_django_blog",
"id": "13d316d93c3864b1d6a037814a1af4f210e92119",
"size": "129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "liuyan/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13265"
},
{
"name": "HTML",
"bytes": "59910"
},
{
"name": "JavaScript",
"bytes": "5904"
},
{
"name": "Python",
"bytes": "35215"
}
],
"symlink_target": ""
} |
from redis import StrictRedis
import json
import requests
from insight.reader import get_file_for_url
from insight.engines import documents
from insight.writer import get_thumb_from_cache, have_cache_for_kwargs, get_thumb_path_for_kwargs
try:
import settings
except ImportError:
pass
REDIS_QUEUE_KEY = getattr(settings, 'REDIS_QUEUE_KEY', 'insight')
REDIS_HOST = getattr(settings, 'REDIS_HOST', 'localhost')
REDIS_PORT = getattr(settings, 'REDIS_PORT', 6379)
REDIS_DB = getattr(settings, 'REDIS_PORT', 0)
redis = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)
def main():
print "Launch insight worker"
while 1:
msg = redis.blpop(REDIS_QUEUE_KEY)
params = json.loads(msg[1])
print u"Consuming task for doc %s" % params['url']
error = True
num = 0
while error and num < 3:
try:
file_obj, is_from_cache = get_file_for_url(params['url'])
error = False
except:
sleep(10)
num += 1
error = True
print u"Got %s (%s)" % (params['url'], is_from_cache)
extract_parameters = {'url': params['url'],
'max_previews': params['max_previews'],
'engine': params.get('engine', 'document')}
for size in params['sizes']:
extract_parameters['width'] = size[0]
extract_parameters['height'] = size[1]
print "Processing", extract_parameters
num_pages = documents.extract_image(file_obj, **extract_parameters)
file_obj.seek(0)
print "Processed", num_pages, "pages"
if 'callback' in params and params['callback'] is not None:
try:
req = requests.post(params['callback'], data={'num_pages': num_pages})
print req.url, num_pages
except requests.exceptions.ConnectionError:
# For localhost error on production server
pass
| {
"content_hash": "864278f2140b21b7c29a3801a548f64a",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 97,
"avg_line_length": 35.44827586206897,
"alnum_prop": 0.5744163424124513,
"repo_name": "novapost/insight",
"id": "d48e01d62dfe5f143d29df698fb3e575d2c10ae9",
"size": "2080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insight/worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19910"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import time
import datetime
import pickle
import ProcessedTweet
import Storage
import pigeo
from textblob.classifiers import NaiveBayesClassifier
from textblob.sentiments import NaiveBayesAnalyzer
from textblob import TextBlob
import time
from textblob import Blobber
from textblob.sentiments import NaiveBayesAnalyzer
from collections import namedtuple
import string
import nltk
from textblob.en import sentiment as pattern_sentiment
from textblob.tokenizers import word_tokenize
from textblob.decorators import requires_nltk_corpus
from textblob.base import BaseSentimentAnalyzer, DISCRETE, CONTINUOUS
from WLMNaiveBayes import NaiveBayesAnalyzerWLM
def _default_feature_extractor(words):
"""Default feature extractor for the NaiveBayesAnalyzer."""
return dict(((word, True) for word in words))
class Analizers:
def __init__(self,isWorking):
pigeo.load_model_unzipped()
self.storage = Storage.Storage()
self.isWorking = isWorking
def ProcessUpdates(self):
isWorking = True
f = open('my_classifier.pickle', 'rb')
classifier = pickle.load(f)
f.close()
# classifier = NaiveBayesAnalyzerWLM()
# classifier.train()
# f = open('my_classifier.pickle', 'wb')
# pickle.dump(classifier, f)
# f.close()
print 'trained'
tb = Blobber(analyzer=classifier)
self.storage.DeleteProcessedUpdates();
self.storage.transportToProcessed()
counter = 1;
unprocessedUpdates = []
shouldContinue = True
userUpdates = self.storage.GetUnprocessedUpdates()
while(shouldContinue):
for userUpdate in userUpdates:
print counter;
printable = set(string.printable)
userUpdate.text = ''.join(filter(lambda x: x in printable, userUpdate.text))
unprocessedUpdates.append(userUpdate);
if(unprocessedUpdates.__len__()==1000):
self.processLocation(unprocessedUpdates,tb);
unprocessedUpdates = []
counter = counter+1;
self.processLocation(unprocessedUpdates, tb);
unprocessedUpdates = []
shouldContinue = (userUpdates.__len__()>0)
isWorking = False
def processLocation(self,unprocessedUpdates,textBlob):
locations = pigeo.geo([ unprocessedUpdate.text for unprocessedUpdate in unprocessedUpdates])
counter = 0;
for unprocessedUpdate in unprocessedUpdates:
print counter
location = locations[counter]
blobPattern = TextBlob(unprocessedUpdate.text)
blobBayes = textBlob(unprocessedUpdate.text)
patternSentiment = blobPattern.sentiment
bayes_sentiment = blobBayes.sentiment
processedTweet = ProcessedTweet.ProcessedTweet(unprocessedUpdate.term, unprocessedUpdate.id_str, unprocessedUpdate.text,
location['lat'], location['lon'], location['country'],
location['state'], location['city'], unprocessedUpdate.creation_date
, patternSentiment.polarity, patternSentiment.subjectivity,
bayes_sentiment.classification, bayes_sentiment.p_neg,
bayes_sentiment.p_pos)
self.storage.SaveProcessedTweet(processedTweet)
counter = counter +1;
def GroupProcessedUpdates(self):
self.storage.GroupAnalyzedUpdatesByCountry()
self.storage.GroupAnalyzedUpdatesByState()
self.storage.GroupAnalyzedUpdatesByCity() | {
"content_hash": "27d1a914207bf2cff2f4ad16650ab9e8",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 132,
"avg_line_length": 40.0625,
"alnum_prop": 0.6359854394175767,
"repo_name": "juanc5ibanez/WhoLovesMe",
"id": "9827819b95b6c96215bfa9029fbfa5f32941a476",
"size": "3846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Analizers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4505"
},
{
"name": "HTML",
"bytes": "60506"
},
{
"name": "JavaScript",
"bytes": "511152"
},
{
"name": "Python",
"bytes": "82317"
},
{
"name": "Shell",
"bytes": "2137"
}
],
"symlink_target": ""
} |
from rspecs.commons import DEFAULT_XMLNS
from rspecs.commons_com import EMULAB_XMLNS, Sliver
from rspecs.parser_base import ParserBase
import core
logger = core.log.getLogger("utility-rspec")
class CRMv3RequestParser(ParserBase):
def __init__(self, from_file=None, from_string=None):
super(CRMv3RequestParser, self).__init__(from_file, from_string)
self.xmlns = DEFAULT_XMLNS
self.__com = EMULAB_XMLNS
def check_c_resource(self, node):
# according to the proposed URNs structure, a C-node MUST have
# "vtam" as resource-name (component_id) and authority
# (component_manager_id) fields
if (not node.attrib.get("component_id")) or\
(not node.attrib.get("component_manager_id")):
return False
if ("vtam" in node.attrib.get("component_id")) and\
("vtam" in node.attrib.get("component_manager_id")):
return True
return False
def get_slivers(self):
# nodes = self.rspec.xpath("//d:node[@component_id='%s']" %\
# component_id, namespaces = {"d": self.xmlns})
nodes = self.__find_nodes()
sliver_list = []
for node in nodes:
if not self.check_c_resource(node):
logger.info("Skipping this node, not a C-res: %s", (node,))
continue
logger.debug("Analizing C-res: %s" % (node,))
server_c_id = node.attrib.get("component_id")
server_cm_id = node.attrib.get("component_manager_id")
server_client_id = node.attrib.get("client_id")
server_exclusive = node.attrib.get("exclusive")
server_available = False # Default, not important at this point
sliver_type = self.__find_sliver(node)
sliver_type_name = sliver_type.attrib.get("name")
ram = None
disk = None
cores = None
img_instance = sliver_type.find(
"{%s}xen" % self.rspec.nsmap.get("emulab")) # EMULAB_XMLNS
if img_instance is not None:
cores = img_instance.attrib.get("cores")
ram = img_instance.attrib.get("ram")
disk = img_instance.attrib.get("disk")
disk_image_name = "default"
disk_image = sliver_type.find("{%s}disk_image" % self.xmlns)
if disk_image is not None:
disk_image_name = disk_image.attrib.get("name")
sliver_elem = Sliver(server_c_id, server_cm_id, server_client_id,
server_exclusive, server_available,
sliver_type_name, disk_image_name,
ram, disk, cores,
)
if sliver_elem is not None:
# Retrieve contects of Sliver object
sliver_list.append(sliver_elem.__dict__)
return sliver_list
def __find_sliver(self, node):
sliver_type = node.find("{%s}sliver_type" % self.xmlns)
if sliver_type is None:
self.raise_exception("Sliver_type tag not found!")
return sliver_type
def __find_nodes(self):
nodes = self.rspec.findall("{%s}node" % self.xmlns)
if nodes is None:
self.raise_exception("Node tag not found!")
return nodes
def get_nodes(self):
return self.__find_nodes()
# def __find_slivers(self, nodes=None):
# slivers = []
# if nodes is not None:
# for node in nodes:
# slivers.extend(node.findall("{%s}sliver_type" % self.xmlns))
# else:
# slivers = self.rspec.findall("{%s}sliver_type" % self.xmlns)
# if slivers is None:
# self.raise_exception("Sliver_type tag not found!")
# return slivers
| {
"content_hash": "b2ca8e81a41b1431ddd05552eac81354",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 77,
"avg_line_length": 40.34736842105263,
"alnum_prop": 0.5570049569527785,
"repo_name": "dana-i2cat/felix",
"id": "00d5987d1f1694264526a77c5069df7f5f84e7c1",
"size": "3833",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/resource/utilities/rspecs/crm/request_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "DTrace",
"bytes": "370"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11211"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "7875883"
},
{
"name": "Shell",
"bytes": "258079"
}
],
"symlink_target": ""
} |
import datetime
import random
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.db import models
from django.template.loader import render_to_string
from django.utils.hashcompat import sha_constructor
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
__all__ = ['Invitation']
class InvitationManager(models.Manager):
def create_invitation(self, user, email):
"""
Create an ``Invitation`` and returns it.
The code for the ``Invitation`` will be a SHA1 hash, generated
from a combination of the ``User``'s username and a random salt.
"""
kwargs = {'from_user': user, 'email': email}
date_invited = datetime.datetime.now()
kwargs['date_invited'] = date_invited
#kwargs['groups':groups]
kwargs['expiration_date'] = date_invited + datetime.timedelta(settings.ACCOUNT_INVITATION_DAYS)
salt = sha_constructor(str(random.random())).hexdigest()[:5]
kwargs['code'] = sha_constructor("%s%s%s" % (datetime.datetime.now(), salt, user.username)).hexdigest()
print kwargs
invite = self.create(**kwargs)
return invite
def remaining_invitations_for_user(self, user):
""" Returns the number of remaining invitations for a given ``User``
if ``INVITATIONS_PER_USER`` has been set.
"""
if hasattr(settings, 'INVITATIONS_PER_USER'):
inviteds_count = self.filter(from_user=user).count()
remaining_invitations = settings.INVITATIONS_PER_USER - inviteds_count
if remaining_invitations < 0:
# Possible for admin to change INVITATIONS_PER_USER
# to something lower than the initial setting, resulting
# in a negative value
return 0
return remaining_invitations
def delete_expired_invitations(self):
"""
Deletes all unused ``Invitation`` objects that are past the expiration date
"""
self.filter(expiration_date__lt=datetime.datetime.now(), used=False).delete()
class Invitation(models.Model):
code = models.CharField(_('invitation code'), max_length=40)
date_invited = models.DateTimeField(_('date invited'))
expiration_date = models.DateTimeField()
used = models.BooleanField(default=False)
from_user = models.ForeignKey(User, related_name='invitations_sent')
to_user = models.ForeignKey(User, null=True, blank=True, related_name='invitation_received')
email = models.EmailField(unique=True)
objects = InvitationManager()
def __unicode__(self):
return u"Invitation from %s to %s" % (self.from_user.username, self.email)
def expired(self):
return timezone.make_aware(self.expiration_date,timezone.get_default_timezone()) < timezone.now()
def send(self, from_email=settings.DEFAULT_FROM_EMAIL,
subject_template='invitation/invitation_email_subject.txt',
message_template='invitation/invitation_email.txt'):
"""
Send an invitation email.
"""
current_site = Site.objects.get_current()
subject = render_to_string(subject_template,
{'invitation': self,
'site': current_site})
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string(message_template,
{'invitation': self,
'expiration_days': settings.ACCOUNT_INVITATION_DAYS,
'site': current_site})
send_mail(subject, message, from_email, [self.email])
#Extends the invitation for X days from the time it's called, where X is the account_invitation_days
def extend(self):
date_now = datetime.datetime.now()
extend_time = timedelta(days=settings.ACCOUNT_INVITATION_DAYS)
self.expiration_date = date_now + extend_time
self.save()
| {
"content_hash": "b6a0f4ca9d688ee681248f116bc84c21",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 111,
"avg_line_length": 40.20192307692308,
"alnum_prop": 0.636689787132265,
"repo_name": "joeatmatterport/django-invitation-simplified",
"id": "298bd02d78cb41f90501d6917371cea39085031f",
"size": "4181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invitation/models.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20506"
}
],
"symlink_target": ""
} |
"""Abstracts querying Cassandra to manipulate timeseries."""
from __future__ import absolute_import
from __future__ import print_function
import logging
import uuid
import os
from os import path as os_path
import multiprocessing
import cassandra
from cassandra import murmur3
from cassandra import cluster as c_cluster
from cassandra import concurrent as c_concurrent
from cassandra import encoder as c_encoder
from cassandra import query as c_query
from cassandra import policies as c_policies
from cassandra.io import asyncorereactor as c_asyncorereactor
from biggraphite import accessor as bg_accessor
from biggraphite.drivers import _downsampling
from biggraphite.drivers import _utils
log = logging.getLogger(__name__)
# Round the row size to 1000 seconds
_ROW_SIZE_PRECISION_MS = 1000 * 1000
DEFAULT_KEYSPACE = "biggraphite"
DEFAULT_CONTACT_POINTS = "127.0.0.1"
DEFAULT_PORT = 9042
DEFAULT_TIMEOUT = 10.0
DEFAULT_CONNECTIONS = 4
# Disable compression per default as this is clearly useless for writes and
# reads do not generate that much traffic.
DEFAULT_COMPRESSION = False
# Current value is based on Cassandra page settings, so that everything fits in a single
# reply with default settings.
# TODO: Mesure actual number of metrics for existing queries and estimate a more
# reasonable limit, also consider other engines.
DEFAULT_MAX_METRICS_PER_GLOB = 5000
DEFAULT_TRACE = False
DEFAULT_BULKIMPORT = False
OPTIONS = {
"keyspace": str,
"contact_points": _utils.list_from_str,
"timeout": float,
"connections": int,
"compression": _utils.bool_from_str,
"max_metrics_per_glob": int,
"trace": bool,
"bulkimport": bool,
}
def add_argparse_arguments(parser):
"""Add Cassandra arguments to an argparse parser."""
parser.add_argument(
"--cassandra_keyspace", metavar="NAME",
help="Cassandra keyspace.",
default=DEFAULT_KEYSPACE)
parser.add_argument(
"--cassandra_contact_points", metavar="HOST", nargs="+",
help="Hosts used for discovery.",
default=DEFAULT_CONTACT_POINTS)
parser.add_argument(
"--cassandra_port", metavar="PORT", type=int,
help="The native port to connect to.",
default=DEFAULT_PORT)
parser.add_argument(
"--cassandra_connections", metavar="N", type=int,
help="Number of connections per Cassandra host per process.",
default=DEFAULT_CONNECTIONS)
parser.add_argument(
"--cassandra_timeout", metavar="TIMEOUT", type=int,
help="Cassandra query timeout in seconds.",
default=DEFAULT_TIMEOUT)
parser.add_argument(
"--cassandra_compression", metavar="COMPRESSION", type=str,
help="Cassandra network compression.",
default=DEFAULT_COMPRESSION)
parser.add_argument(
"--cassandra_max_metrics_per_glob",
help="Maximum number of metrics returned for a glob query.",
default=DEFAULT_MAX_METRICS_PER_GLOB)
parser.add_argument(
"--cassandra_trace",
help="Enable query traces",
default=DEFAULT_TRACE,
action="store_true")
parser.add_argument(
"--cassandra_bulkimport", action="store_true",
help="Generate files needed for bulkimport.")
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
class Error(bg_accessor.Error):
"""Base class for all exceptions from this module."""
class CassandraError(Error):
"""Fatal errors accessing Cassandra."""
class RetryableCassandraError(CassandraError, bg_accessor.RetryableError):
"""Errors accessing Cassandra that could succeed if retried."""
class NotConnectedError(CassandraError):
"""Fatal errors accessing Cassandra because the Accessor is not connected."""
class TooManyMetrics(CassandraError):
"""A name glob yielded more than MAX_METRIC_PER_GLOB metrics."""
class InvalidArgumentError(Error, bg_accessor.InvalidArgumentError):
"""Callee did not follow requirements on the arguments."""
class InvalidGlobError(InvalidArgumentError):
"""The provided glob is invalid."""
# TODO(c.chary): convert some of these to options, but make sure
# they are stored in the database an loaded automatically from
# here.
# HEURISTIC PARAMETERS
# ====================
# The following few constants are heuristics that are used to tune
# the datatable.
# We expect timestamp T to be written at T +/- _OUT_OF_ORDER_S
# As result we delay expiry and compaction by that much time
_OUT_OF_ORDER_S = 15 * MINUTE
# We expect to use this >>1440 points per read(~24h with a resolution of one minute).
# We round it up to a nicer value.
_EXPECTED_POINTS_PER_READ = 2000
# The API has a resolution of 1 sec. We don't want partitions to contain
# less than 6 hour of data (= 21600 points in the worst case).
_MIN_PARTITION_SIZE_MS = 6 * HOUR
# We also don't want partitions to be too big. The official limit is 100k.
_MAX_PARTITION_SIZE = 25000
# As we disable commit log, we flush memory data to disk every so
# often to make sure they are persisted.
_FLUSH_MEMORY_EVERY_S = 15 * MINUTE
# Can one of "DateTieredCompactionStrategy" or "TimeWindowCompactionStrategy".
# Support for TWCS is still experimental and require Cassandra >=3.8.
_COMPACTION_STRATEGY = "DateTieredCompactionStrategy"
_COMPONENTS_MAX_LEN = 64
_LAST_COMPONENT = "__END__"
_METADATA_CREATION_CQL_PATH_COMPONENTS = ", ".join(
"component_%d text" % n for n in range(_COMPONENTS_MAX_LEN)
)
_METADATA_CREATION_CQL_METRICS = str(
"CREATE TABLE IF NOT EXISTS \"%(keyspace)s\".metrics ("
" name text,"
" id uuid,"
" config map<text, text>,"
" " + _METADATA_CREATION_CQL_PATH_COMPONENTS + ","
" PRIMARY KEY (name)"
");"
)
_METADATA_CREATION_CQL_DIRECTORIES = str(
"CREATE TABLE IF NOT EXISTS \"%(keyspace)s\".directories ("
" name text,"
" " + _METADATA_CREATION_CQL_PATH_COMPONENTS + ","
" PRIMARY KEY (name)"
");"
)
_METADATA_CREATION_CQL_PATH_INDEXES = [
"CREATE CUSTOM INDEX IF NOT EXISTS ON \"%%(keyspace)s\".%(table)s (component_%(component)d)"
" USING 'org.apache.cassandra.index.sasi.SASIIndex'"
" WITH OPTIONS = {"
" 'analyzer_class': 'org.apache.cassandra.index.sasi.analyzer.NonTokenizingAnalyzer',"
" 'case_sensitive': 'true'"
" };" % {"table": t, "component": n}
for t in 'metrics', 'directories'
for n in range(_COMPONENTS_MAX_LEN)
]
_METADATA_CREATION_CQL = [
_METADATA_CREATION_CQL_METRICS,
_METADATA_CREATION_CQL_DIRECTORIES,
] + _METADATA_CREATION_CQL_PATH_INDEXES
_DATAPOINTS_CREATION_CQL_TEMPLATE = str(
"CREATE TABLE IF NOT EXISTS %(table)s ("
" metric uuid," # Metric UUID.
" time_start_ms bigint," # Lower bound for this row.
" offset smallint," # time_start_ms + offset * precision = timestamp
" value double," # Value for the point.
" count int," # If value is sum, divide by count to get the avg.
" PRIMARY KEY ((metric, time_start_ms), offset)"
")"
" WITH CLUSTERING ORDER BY (offset DESC)"
" AND default_time_to_live = %(default_time_to_live)d"
" AND compaction = {"
" 'class': '%(compaction_strategy)s',"
" 'timestamp_resolution': 'MICROSECONDS',"
" %(compaction_options)s"
" };"
)
_DATAPOINTS_CREATION_CQL_CS_TEMPLATE = {
"DateTieredCompactionStrategy": str(
" 'base_time_seconds': '%(base_time_seconds)d',"
" 'max_window_size_seconds': %(max_window_size_seconds)d"
),
"TimeWindowCompactionStrategy": str(
" 'compaction_window_unit': '%(compaction_window_unit)s',"
" 'compaction_window_size': %(compaction_window_size)d"
)
}
def _row_size_ms(stage):
"""Number of milliseconds to put in one Cassandra row.
Args:
stage: The stage the table stores.
Returns:
An integer, the duration in milliseconds.
"""
row_size_ms = min(
stage.precision_ms * _MAX_PARTITION_SIZE,
max(
stage.precision_ms * _EXPECTED_POINTS_PER_READ,
_MIN_PARTITION_SIZE_MS
)
)
return bg_accessor.round_up(row_size_ms, _ROW_SIZE_PRECISION_MS)
class _CappedConnection(c_asyncorereactor.AsyncoreConnection):
"""A connection with a cap on the number of in-flight requests per host."""
# 300 is the minimum with protocol version 3, default is 65536
max_in_flight = 300
class _CountDown(_utils.CountDown):
"""Cassandra specific version of CountDown."""
def on_cassandra_result(self, result):
self.on_result(result)
def on_cassandra_failure(self, exc):
self.on_failure(exc)
class _LazyPreparedStatements(object):
"""On demand factory of prepared statements and tables.
As per design (CASSANDRA_DESIGN.md) we have one table per retention stage.
This creates tables and corresponding prepared statements once they are needed.
When bulkimport is True this class will instead write the files necessary to
bulkimport data.
"""
def __init__(self, session, keyspace, bulkimport=False):
self._keyspace = keyspace
self._session = session
self._bulkimport = bulkimport
self.__stage_to_insert = {}
self.__stage_to_select = {}
self.__data_files = {}
def __bulkimport_filename(self, filename):
current = multiprocessing.current_process()
uid = str(current._identity[0]) if len(current._identity) else "0"
filename = os_path.join("data", uid, filename)
dirname = os_path.dirname(filename)
if not os_path.exists(dirname):
os.makedirs(dirname)
return filename
def _bulkimport_write_schema(self, stage, statement_str):
filename = self.__bulkimport_filename(stage.as_string + ".cql")
log.info("Writing schema for '%s' in '%s'" % (stage.as_string, filename))
open(filename, "w").write(statement_str)
def _bulkimport_write_datapoint(self, stage, args):
stage_str = stage.as_string
if stage_str not in self.__data_files:
statement_str = self._create_datapoints_table_stmt(stage)
self._bulkimport_write_schema(stage, statement_str)
filename = self.__bulkimport_filename(stage.as_string + ".csv")
log.info("Writing data for '%s' in '%s'" % (stage.as_string, filename))
fp = open(filename, "w")
self.__data_files[stage_str] = fp
else:
fp = self.__data_files[stage_str]
fp.write(",".join([str(a) for a in args]) + "\n")
def _create_datapoints_table_stmt(self, stage):
# Time after which data expire.
time_to_live = stage.duration + _OUT_OF_ORDER_S
# Estimate the age of the oldest data we still expect to read.
fresh_time = stage.precision * _EXPECTED_POINTS_PER_READ
cs_template = _DATAPOINTS_CREATION_CQL_CS_TEMPLATE.get(_COMPACTION_STRATEGY)
if not cs_template:
raise InvalidArgumentError(
"Unknown compaction strategy '%s'" % _COMPACTION_STRATEGY)
cs_kwargs = {}
if _COMPACTION_STRATEGY == "DateTieredCompactionStrategy":
# Time it takes to receive a step
arrival_time = stage.precision + _OUT_OF_ORDER_S
# See http://www.datastax.com/dev/blog/datetieredcompactionstrategy
# - If too small: Reads need to touch many sstables
# - If too big: We pay compaction overhead for data that are
# never accessed anymore and get huge sstables
# We set a minimum of arrival_time so that data are in order
max_window_size_seconds = max(fresh_time, arrival_time + 1)
cs_kwargs["base_time_seconds"] = arrival_time
cs_kwargs["max_window_size_seconds"] = max_window_size_seconds
elif _COMPACTION_STRATEGY == "TimeWindowCompactionStrategy":
# TODO(c.chary): Tweak this once we have an actual 3.9 setup.
window_size = max(
# Documentation says that we should no more than 50 buckets.
time_to_live / 50,
# But we don't want multiple sstables per hour.
HOUR,
# Also try to optimize for reads
fresh_time
)
# Make it readable.
if window_size > DAY:
unit = 'DAYS'
window_size /= DAY
else:
unit = 'HOURS'
window_size /= HOUR
cs_kwargs["compaction_window_unit"] = unit
cs_kwargs["compaction_window_size"] = max(1, window_size)
compaction_options = cs_template % cs_kwargs
kwargs = {
"table": self._get_table_name(stage),
"default_time_to_live": time_to_live,
"memtable_flush_period_in_ms": _FLUSH_MEMORY_EVERY_S * 1000,
"comment": "{\"created_by\": \"biggraphite\", \"schema_version\": 0}",
"compaction_strategy": _COMPACTION_STRATEGY,
"compaction_options": compaction_options,
}
return _DATAPOINTS_CREATION_CQL_TEMPLATE % kwargs
def _create_datapoints_table(self, stage):
# The statement is idempotent
statement_str = self._create_datapoints_table_stmt(stage)
self._session.execute(statement_str)
def _get_table_name(self, stage):
return "\"{}\".\"datapoints_{}p_{}s\"".format(
self._keyspace, stage.points, stage.precision)
def prepare_insert(self, stage, metric_id, time_start_ms, offset, value, count):
statement = self.__stage_to_insert.get(stage)
args = (metric_id, time_start_ms, offset, value, count)
if self._bulkimport:
self._bulkimport_write_datapoint(stage, args)
return None, args
if statement:
return statement, args
self._create_datapoints_table(stage)
statement_str = (
"INSERT INTO %(table)s"
" (metric, time_start_ms, offset, value, count)"
" VALUES (?, ?, ?, ?, ?);"
) % {"table": self._get_table_name(stage)}
statement = self._session.prepare(statement_str)
statement.consistency_level = cassandra.ConsistencyLevel.ONE
self.__stage_to_insert[stage] = statement
return statement, args
def prepare_select(self, stage, metric_id, row_start_ms, row_min_offset, row_max_offset):
statement = self.__stage_to_select.get(stage)
args = (metric_id, row_start_ms, row_min_offset, row_max_offset)
if statement:
return statement, args
self._create_datapoints_table(stage)
statement_str = (
"SELECT time_start_ms, offset, value, count FROM %(table)s"
" WHERE metric=? AND time_start_ms=?"
" AND offset >= ? AND offset < ? "
" ORDER BY offset;"
) % {"table": self._get_table_name(stage)}
statement = self._session.prepare(statement_str)
statement.consistency_level = cassandra.ConsistencyLevel.ONE
self.__stage_to_select[stage] = statement
return statement, args
class _CassandraAccessor(bg_accessor.Accessor):
"""Provides Read/Write accessors to Cassandra.
Please refer to bg_accessor.Accessor.
"""
_UUID_NAMESPACE = uuid.UUID('{00000000-1111-2222-3333-444444444444}')
def __init__(self,
keyspace='biggraphite',
contact_points=DEFAULT_CONTACT_POINTS,
port=DEFAULT_PORT,
connections=DEFAULT_CONNECTIONS,
timeout=DEFAULT_TIMEOUT,
compression=DEFAULT_COMPRESSION,
max_metrics_per_glob=DEFAULT_MAX_METRICS_PER_GLOB,
trace=DEFAULT_TRACE,
bulkimport=DEFAULT_BULKIMPORT):
"""Record parameters needed to connect.
Args:
keyspace: Base names of Cassandra keyspaces dedicated to BigGraphite.
contact_points: list of strings, the hostnames or IP to use to discover Cassandra.
port: The port to connect to, as an int.
connections: How many worker threads to use.
timeout: Default timeout for operations in seconds.
compression: One of False, True, "lz4", "snappy"
max_metrics_per_glob: int, Maximum number of metrics per glob.
trace: bool, Enabling query tracing.
bulkimport: bool, Configure the accessor to generate files necessary for
bulk import.
"""
backend_name = "cassandra:" + keyspace
super(_CassandraAccessor, self).__init__(backend_name)
self.keyspace = keyspace
self.keyspace_metadata = keyspace + "_metadata"
self.contact_points = contact_points
self.port = port
self.max_metrics_per_glob = max_metrics_per_glob
self.__connections = connections
self.__compression = compression
self.__trace = trace
self.__bulkimport = bulkimport
# For some reason this isn't enabled yet for pypy, even if it seems to
# be working properly.
# See https://github.com/datastax/python-driver/blob/master/cassandra/cluster.py#L188
self.__load_balancing_policy = (
c_policies.TokenAwarePolicy(c_policies.DCAwareRoundRobinPolicy()))
self.__downsampler = _downsampling.Downsampler()
self.__cluster = None # setup by connect()
self.__lazy_statements = None # setup by connect()
self.__timeout = timeout
self.__insert_metrics_statement = None # setup by connect()
self.__select_metric_statement = None # setup by connect()
self.__session = None # setup by connect()
def connect(self, skip_schema_upgrade=False):
"""See bg_accessor.Accessor."""
super(_CassandraAccessor, self).connect(skip_schema_upgrade=skip_schema_upgrade)
self.__cluster = c_cluster.Cluster(
self.contact_points, self.port,
executor_threads=self.__connections,
compression=self.__compression,
load_balancing_policy=self.__load_balancing_policy,
)
self.__cluster.connection_class = _CappedConnection # Limits in flight requests
self.__cluster.row_factory = c_query.tuple_factory # Saves 2% CPU
self.__session = self.__cluster.connect()
if self.__timeout:
self.__session.default_timeout = self.__timeout
if not skip_schema_upgrade:
self._upgrade_schema()
self.__lazy_statements = _LazyPreparedStatements(
self.__session, self.keyspace, self.__bulkimport)
# Metadata (metrics and directories)
components_names = ", ".join("component_%d" % n for n in range(_COMPONENTS_MAX_LEN))
components_marks = ", ".join("?" for n in range(_COMPONENTS_MAX_LEN))
self.__insert_metrics_statement = self.__session.prepare(
"INSERT INTO \"%s\".metrics (name, id, config, %s) VALUES (?, ?, ?, %s);"
% (self.keyspace_metadata, components_names, components_marks)
)
self.__insert_directories_statement = self.__session.prepare(
"INSERT INTO \"%s\".directories (name, %s) VALUES (?, %s) IF NOT EXISTS;"
% (self.keyspace_metadata, components_names, components_marks)
)
self.__select_metric_statement = self.__session.prepare(
"SELECT id, config FROM \"%s\".metrics WHERE name = ?;" % self.keyspace_metadata
)
self.is_connected = True
def _execute(self, *args, **kwargs):
"""Wrapper for __session.execute_async()."""
if self.__bulkimport:
return []
if self.__trace:
kwargs["trace"] = True
log.debug(' '.join([str(arg) for arg in args]))
result = self.__session.execute(*args, **kwargs)
if self.__trace:
trace = result.get_query_trace()
for e in trace.events:
log.debug("%s: %s" % (e.source_elapsed, str(e)))
return result
def _execute_async(self, *args, **kwargs):
"""Wrapper for __session.execute_async()."""
if self.__bulkimport:
class _FakeFuture(object):
def add_callbacks(self, on_result, on_failure):
on_result(None)
return _FakeFuture()
if self.__trace:
kwargs["trace"] = True
log.debug(' '.join([str(arg) for arg in args]))
future = self.__session.execute_async(*args, **kwargs)
if self.__trace:
trace = future.get_query_trace()
for e in trace.events:
log.debug(e.source_elapsed, e.description)
return future
def _execute_concurrent(self, session, statements_and_parameters, **kwargs):
"""Wrapper for concurrent.execute_concurrent()."""
if self.__bulkimport:
return []
if not self.__trace:
return c_concurrent.execute_concurrent(
session, statements_and_parameters, **kwargs)
query_results = []
for statement, params in statements_and_parameters:
try:
result = self._execute(statement, params, trace=True)
success = True
except Exception as e:
result = e
success = False
query_results.append((success, result))
return query_results
def make_metric(self, name, metadata):
"""See bg_accessor.Accessor."""
encoded_name = bg_accessor.encode_metric_name(name)
id = uuid.uuid5(self._UUID_NAMESPACE, encoded_name)
return bg_accessor.Metric(name, id, metadata)
def create_metric(self, metric):
"""See bg_accessor.Accessor."""
super(_CassandraAccessor, self).create_metric(metric)
if self.__bulkimport:
return
components = self._components_from_name(metric.name)
queries = []
# Check if parent dir exists. This is one round-trip but worthwile since
# otherwise creating each parent directory requires a round-trip and the
# vast majority of metrics have siblings.
parent_dir = metric.name.rpartition(".")[0]
if parent_dir and not self.glob_directory_names(parent_dir):
queries.extend(self._create_parent_dirs_queries(components))
# Finally, create the metric
padding = [None] * (_COMPONENTS_MAX_LEN - len(components))
metadata_dict = metric.metadata.as_string_dict()
queries.append((
self.__insert_metrics_statement,
[metric.name, metric.id, metadata_dict] + components + padding,
))
# We have to run queries in sequence as:
# - we want them to have IF NOT EXISTS ease the hotspot on root directories
# - we do not want directories or metrics without parents (not handled by callee)
# - batch queries cannot contain IF NOT EXISTS and involve multiple primary keys
# We can still end up with empty directories, which will need a reaper job to clean them.
for statement, args in queries:
self._execute(statement, args)
def _create_parent_dirs_queries(self, components):
queries = []
directory_path = []
for component in components[:-2]: # -1 for _LAST_COMPONENT, -1 for metric
directory_path.append(component)
directory_name = ".".join(directory_path)
directory_components = directory_path + [_LAST_COMPONENT]
directory_padding = [None] * (_COMPONENTS_MAX_LEN - len(directory_components))
queries.append((
self.__insert_directories_statement,
[directory_name] + directory_components + directory_padding,
))
return queries
@staticmethod
def _components_from_name(metric_name):
res = metric_name.split(".")
res.append(_LAST_COMPONENT)
return res
def drop_all_metrics(self):
"""See bg_accessor.Accessor."""
super(_CassandraAccessor, self).drop_all_metrics()
for keyspace in self.keyspace, self.keyspace_metadata:
statement_str = "SELECT table_name FROM system_schema.tables WHERE keyspace_name = %s;"
tables = [r[0] for r in self._execute(statement_str, (keyspace, ))]
for table in tables:
self._execute("TRUNCATE \"%s\".\"%s\";" % (keyspace, table))
def fetch_points(self, metric, time_start, time_end, stage):
"""See bg_accessor.Accessor."""
super(_CassandraAccessor, self).fetch_points(
metric, time_start, time_end, stage)
log.debug(
"fetch: [%s, start=%d, end=%d, stage=%s]",
metric.name, time_start, time_end, stage)
time_start_ms = int(time_start) * 1000
time_end_ms = int(time_end) * 1000
time_start_ms = max(time_end_ms - stage.duration_ms, time_start_ms)
statements_and_args = self._fetch_points_make_selects(
metric.id, time_start_ms, time_end_ms, stage)
query_results = self._execute_concurrent(
self.__session,
statements_and_args,
results_generator=True,
)
return bg_accessor.PointGrouper(
metric, time_start_ms, time_end_ms, stage, query_results)
def _fetch_points_make_selects(self, metric_id, time_start_ms,
time_end_ms, stage):
# We fetch with ms precision, even though we only store with second
# precision.
row_size_ms_stage = _row_size_ms(stage)
first_row = bg_accessor.round_down(time_start_ms, row_size_ms_stage)
last_row = bg_accessor.round_down(time_end_ms, row_size_ms_stage)
res = []
# xrange(a,b) does not contain b, so we use last_row+1
for row_start_ms in xrange(first_row, last_row + 1, row_size_ms_stage):
# adjust min/max offsets to select everything
row_min_offset_ms = 0
row_max_offset_ms = row_size_ms_stage
if row_start_ms == first_row:
row_min_offset_ms = time_start_ms - row_start_ms
if row_start_ms == last_row:
row_max_offset_ms = time_end_ms - row_start_ms
row_min_offset = stage.step_ms(row_min_offset_ms)
row_max_offset = stage.step_ms(row_max_offset_ms)
select = self.__lazy_statements.prepare_select(
stage=stage, metric_id=metric_id, row_start_ms=row_start_ms,
row_min_offset=row_min_offset, row_max_offset=row_max_offset,
)
res.append(select)
return res
def has_metric(self, metric_name):
"""See bg_accessor.Accessor."""
super(_CassandraAccessor, self).has_metric(metric_name)
encoded_metric_name = bg_accessor.encode_metric_name(metric_name)
result = list(self._execute(
self.__select_metric_statement, (encoded_metric_name, )))
if not result:
return False
# Small trick here: we also check that the parent directory
# exists because that's what we check to create the directory
# hierarchy.
parent_dir = metric_name.rpartition(".")[0]
if parent_dir and not self.glob_directory_names(parent_dir):
return False
return True
def get_metric(self, metric_name):
"""See bg_accessor.Accessor."""
super(_CassandraAccessor, self).get_metric(metric_name)
metric_name = bg_accessor.encode_metric_name(metric_name)
result = list(self._execute(
self.__select_metric_statement, (metric_name, )))
if not result:
return None
id = result[0][0]
config = result[0][1]
metadata = bg_accessor.MetricMetadata.from_string_dict(config)
return bg_accessor.Metric(metric_name, id, metadata)
def glob_directory_names(self, glob):
"""Return a sorted list of metric directories matching this glob."""
super(_CassandraAccessor, self).glob_directory_names(glob)
return self.__glob_names("directories", glob)
def glob_metric_names(self, glob):
"""Return a sorted list of metric names matching this glob."""
super(_CassandraAccessor, self).glob_metric_names(glob)
return self.__glob_names("metrics", glob)
def __glob_names(self, table, glob):
components = self._components_from_name(glob)
if len(components) > _COMPONENTS_MAX_LEN:
msg = "Metric globs can have a maximum of %d dots" % _COMPONENTS_MAX_LEN - 2
raise bg_accessor.InvalidGlobError(msg)
where_parts = [
"component_%d = %s" % (n, c_encoder.cql_quote(s))
for n, s in enumerate(components)
if s != "*"
]
if len(where_parts) == len(components):
# No wildcard, skip indexes
where = "name = " + c_encoder.cql_quote(glob)
else:
where = " AND ".join(where_parts)
query = (
"SELECT name FROM \"%(keyspace)s\".\"%(table)s\""
" WHERE %(where)s LIMIT %(limit)d ALLOW FILTERING;"
) % {
"keyspace": self.keyspace_metadata, "table": table, "where": where,
"limit": self.max_metrics_per_glob + 1,
}
try:
metrics_names = [r[0] for r in self._execute(query)]
except Exception as e:
raise RetryableCassandraError(e)
if len(metrics_names) > self.max_metrics_per_glob:
msg = "%s yields more than %d results" % (glob, self.max_metrics_per_glob)
raise TooManyMetrics(msg)
metrics_names.sort()
return metrics_names
def insert_points_async(self, metric, datapoints, on_done=None):
"""See bg_accessor.Accessor."""
super(_CassandraAccessor, self).insert_points_async(
metric, datapoints, on_done)
log.debug("insert: [%s, %s]", metric.name, datapoints)
downsampled = self.__downsampler.feed(metric, datapoints)
return self.insert_downsampled_points_async(metric, downsampled, on_done)
def insert_downsampled_points_async(self, metric, downsampled, on_done=None):
"""See bg_accessor.Accessor."""
if not downsampled and on_done:
on_done(None)
return
count_down = None
if on_done:
count_down = _CountDown(count=len(downsampled), on_zero=on_done)
for timestamp, value, count, stage in downsampled:
timestamp_ms = int(timestamp) * 1000
time_offset_ms = timestamp_ms % _row_size_ms(stage)
time_start_ms = timestamp_ms - time_offset_ms
offset = stage.step_ms(time_offset_ms)
statement, args = self.__lazy_statements.prepare_insert(
stage=stage, metric_id=metric.id, time_start_ms=time_start_ms,
offset=offset, value=value, count=count,
)
future = self._execute_async(query=statement, parameters=args)
if count_down:
future.add_callbacks(
count_down.on_cassandra_result,
count_down.on_cassandra_failure,
)
def repair(self, start_key=None, end_key=None, shard=0, nshards=1):
"""See bg_accessor.Accessor.
Slight change for start_key and end_key, they are intrepreted as
tokens directly.
"""
super(_CassandraAccessor, self).repair()
partitioner = self.__cluster.metadata.partitioner
if partitioner != "org.apache.cassandra.dht.Murmur3Partitioner":
log.warn("Partitioner '%s' not supported for repairs" % partitioner)
return
start_token = murmur3.INT64_MIN
stop_token = murmur3.INT64_MAX
if nshards > 1:
tokens = murmur3.INT64_MAX - murmur3.INT64_MIN
my_tokens = tokens / nshards
start_token += my_tokens * shard
stop_token = start_token + my_tokens
if start_key is not None:
start_token = int(start_key)
if end_key is not None:
end_key = int(end_key)
# Step 1: Create missing parent directories.
statement_str = (
"SELECT name, token(name) FROM \"%s\".directories "
"WHERE token(name) > ? LIMIT 1;" %
self.keyspace_metadata)
statement = self.__session.prepare(statement_str)
statement.consistency_level = cassandra.ConsistencyLevel.QUORUM
statement.retry_policy = cassandra.policies.DowngradingConsistencyRetryPolicy
statement.request_timeout = 60
token = start_token
while token < stop_token:
args = (token,)
result = self._execute(statement, args)
if len(result.current_rows) == 0:
break
for row in result:
parent_dir = row.name.rpartition(".")[0]
if parent_dir and not self.glob_directory_names(parent_dir):
log.warning("Creating missing parent dir '%s'" % parent_dir)
components = self._components_from_name(row.name)
queries = self._create_parent_dirs_queries(components)
for statement, args in queries:
self._execute(statement, args)
token = row.system_token_name
def shutdown(self):
"""See bg_accessor.Accessor."""
super(_CassandraAccessor, self).shutdown()
if self.is_connected:
try:
self.__cluster.shutdown()
except Exception as exc:
raise CassandraError(exc)
self.__cluster = None
self.is_connected = False
def _upgrade_schema(self):
# Currently no change, so only upgrade operation is to setup
self.__cluster.refresh_schema_metadata()
keyspaces = self.__cluster.metadata.keyspaces.keys()
for keyspace in [self.keyspace, self.keyspace_metadata]:
if keyspace not in keyspaces:
raise CassandraError("Missing keyspace '%s'." % keyspace)
tables = self.__cluster.metadata.keyspaces[self.keyspace_metadata].tables
if 'metrics' in tables and 'directories' in tables:
return
for cql in _METADATA_CREATION_CQL:
self._execute(cql % {"keyspace": self.keyspace_metadata})
def build(*args, **kwargs):
"""Return a bg_accessor.Accessor using Casssandra.
Args:
keyspace: Base name of Cassandra keyspaces dedicated to BigGraphite.
contact_points: list of strings, the hostnames or IP to use to discover Cassandra.
port: The port to connect to, as an int.
connections: How many worker threads to use.
"""
return _CassandraAccessor(*args, **kwargs)
| {
"content_hash": "ad697185f31301e98d8d190fd8b3bd8a",
"timestamp": "",
"source": "github",
"line_count": 885,
"max_line_length": 99,
"avg_line_length": 39.454237288135594,
"alnum_prop": 0.6144857805653406,
"repo_name": "natbraun/biggraphite",
"id": "837cd7d53eb528355f20dd786ab162c98ff2fb35",
"size": "35510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "biggraphite/drivers/cassandra.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "245802"
}
],
"symlink_target": ""
} |
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$May 18, 2015 16:46:39 EDT$"
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| {
"content_hash": "0bf9ddd89c1d6f104f239c09256f1f9b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 55,
"avg_line_length": 27.285714285714285,
"alnum_prop": 0.6701570680628273,
"repo_name": "nanshe-org/splauncher",
"id": "11435039ed5de234efaf098456a367671a4e7885",
"size": "191",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "splauncher/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "95894"
},
{
"name": "Shell",
"bytes": "3526"
}
],
"symlink_target": ""
} |
import uuid
from cfgm_common import exceptions as vnc_exc
from vnc_api import vnc_api
import contrail_res_handler as res_handler
import sgrule_res_handler as sgrule_handler
class SecurityGroupMixin(object):
def _security_group_vnc_to_neutron(self, sg_obj,
contrail_extensions_enabled=False,
fields=None):
sg_q_dict = {}
extra_dict = {}
extra_dict['contrail:fq_name'] = sg_obj.get_fq_name()
# replace field names
sg_q_dict['id'] = sg_obj.uuid
sg_q_dict['tenant_id'] = self._project_id_vnc_to_neutron(
sg_obj.parent_uuid)
if not sg_obj.display_name:
# for security groups created directly via vnc_api
sg_q_dict['name'] = sg_obj.get_fq_name()[-1]
else:
sg_q_dict['name'] = sg_obj.display_name
sg_q_dict['description'] = sg_obj.get_id_perms().get_description()
# get security group rules
sg_q_dict['security_group_rules'] = []
rule_list = sgrule_handler.SecurityGroupRuleHandler(
self._vnc_lib).security_group_rules_read(sg_obj)
if rule_list:
for rule in rule_list:
sg_q_dict['security_group_rules'].append(rule)
if contrail_extensions_enabled:
sg_q_dict.update(extra_dict)
if fields:
sg_q_dict = self._filter_res_dict(sg_q_dict, fields)
return sg_q_dict
# end _security_group_vnc_to_neutron
def _security_group_neutron_to_vnc(self, sg_q, sg_vnc):
if 'name' in sg_q and sg_q['name']:
sg_vnc.display_name = sg_q['name']
if 'description' in sg_q:
id_perms = sg_vnc.get_id_perms()
id_perms.set_description(sg_q['description'])
sg_vnc.set_id_perms(id_perms)
return sg_vnc
# end _security_group_neutron_to_vnc
def _create_default_security_group(self, proj_obj):
def _get_rule(ingress, sg, prefix, ethertype):
sgr_uuid = str(uuid.uuid4())
if sg:
addr = vnc_api.AddressType(
security_group=proj_obj.get_fq_name_str() + ':' + sg)
elif prefix:
addr = vnc_api.AddressType(
subnet=vnc_api.SubnetType(prefix, 0))
local_addr = vnc_api.AddressType(security_group='local')
if ingress:
src_addr = addr
dst_addr = local_addr
else:
src_addr = local_addr
dst_addr = addr
rule = vnc_api.PolicyRuleType(
rule_uuid=sgr_uuid, direction='>', protocol='any',
src_addresses=[src_addr],
src_ports=[vnc_api.PortType(0, 65535)],
dst_addresses=[dst_addr],
dst_ports=[vnc_api.PortType(0, 65535)],
ethertype=ethertype)
return rule
rules = [_get_rule(True, 'default', None, 'IPv4'),
_get_rule(True, 'default', None, 'IPv6'),
_get_rule(False, None, '0.0.0.0', 'IPv4'),
_get_rule(False, None, '::', 'IPv6')]
sg_rules = vnc_api.PolicyEntriesType(rules)
# create security group
id_perms = vnc_api.IdPermsType(enable=True,
description='Default security group')
sg_obj = vnc_api.SecurityGroup(
name='default', parent_obj=proj_obj,
id_perms=id_perms,
security_group_entries=sg_rules)
self._vnc_lib.security_group_create(sg_obj)
return sg_obj.uuid
def _ensure_default_security_group_exists(self, proj_id):
if proj_id is None:
projects = self._vnc_lib.projects_list()['projects']
for project in projects:
self._ensure_default_security_group_exists(project['uuid'])
return
proj_id = self._project_id_neutron_to_vnc(proj_id)
proj_obj = self._vnc_lib.project_read(id=proj_id,
fields=['security_groups'])
sg_groups = proj_obj.get_security_groups()
for sg_group in sg_groups or []:
if sg_group['to'][-1] == 'default':
return sg_group['uuid']
return self._create_default_security_group(proj_obj)
# end _ensure_default_security_group_exists
class SecurityGroupBaseGet(res_handler.ResourceGetHandler):
resource_get_method = "security_group_read"
class SecurityGroupGetHandler(SecurityGroupBaseGet, SecurityGroupMixin):
resource_list_method = "security_groups_list"
def get_sg_obj(self, id=None, fq_name_str=None):
return self._resource_get(id=id, fq_name_str=fq_name_str)
def resource_get(self, context, sg_id, fields=None):
contrail_extensions_enabled = self._kwargs.get(
'contrail_extensions_enabled', False)
try:
sg_obj = self._resource_get(id=sg_id)
except vnc_exc.NoIdError:
self._raise_contrail_exception(
'SecurityGroupNotFound', id=sg_id, resource='security_group')
return self._security_group_vnc_to_neutron(
sg_obj, contrail_extensions_enabled, fields=fields)
def resource_list_by_project(self, project_id, filters=None):
if project_id:
try:
project_uuid = self._project_id_neutron_to_vnc(project_id)
# Trigger a project read to ensure project sync
self._project_read(proj_id=project_uuid)
except vnc_exc.NoIdError:
return []
else:
project_uuid = None
obj_uuids=None
if filters and 'id' in filters:
obj_uuids = filters['id']
sg_objs = self._resource_list(parent_id=project_uuid,
detail=True, obj_uuids=obj_uuids)
return sg_objs
def resource_list(self, context, filters=None, fields=None):
ret_list = []
contrail_extensions_enabled = self._kwargs.get(
'contrail_extensions_enabled', False)
# collect phase
self._ensure_default_security_group_exists(context['tenant'])
all_sgs = [] # all sgs in all projects
if context and not context['is_admin']:
project_sgs = self.resource_list_by_project(
self._project_id_neutron_to_vnc(context['tenant']),
filters=filters)
all_sgs.append(project_sgs)
else: # admin context
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(
context, filters['tenant_id'])
for p_id in project_ids:
project_sgs = self.resource_list_by_project(p_id,
filters=filters)
all_sgs.append(project_sgs)
else: # no tenant id filter
all_sgs.append(self.resource_list_by_project(None,
filters=filters))
# prune phase
no_rule = res_handler.SGHandler(
self._vnc_lib).get_no_rule_security_group(create=False)
for project_sgs in all_sgs:
for sg_obj in project_sgs:
if no_rule and sg_obj.uuid == no_rule.uuid:
continue
if not self._filters_is_present(
filters, 'name',
sg_obj.get_display_name() or sg_obj.name):
continue
if not self._filters_is_present(
filters, 'description',
sg_obj.get_id_perms().get_description()):
continue
sg_info = self._security_group_vnc_to_neutron(
sg_obj, contrail_extensions_enabled, fields=fields)
ret_list.append(sg_info)
return ret_list
class SecurityGroupDeleteHandler(SecurityGroupBaseGet,
res_handler.ResourceDeleteHandler):
resource_delete_method = "security_group_delete"
def resource_delete(self, context, sg_id):
try:
sg_obj = self._resource_get(id=sg_id)
if sg_obj.name == 'default' and (
self._project_id_neutron_to_vnc(context['tenant']) ==
sg_obj.parent_uuid):
# Deny delete if the security group name is default and
# the owner of the SG is deleting it.
self._raise_contrail_exception(
'SecurityGroupCannotRemoveDefault')
except vnc_exc.NoIdError:
return
try:
self._resource_delete(sg_id)
except vnc_exc.RefsExistError:
self._raise_contrail_exception(
'SecurityGroupInUse', id=sg_id, resource='security_group')
class SecurityGroupUpdateHandler(res_handler.ResourceUpdateHandler,
SecurityGroupBaseGet,
SecurityGroupMixin):
resource_update_method = "security_group_update"
def resource_update_obj(self, sg_obj):
self._resource_update(sg_obj)
def resource_update(self, context, sg_id, sg_q):
sg_q['id'] = sg_id
contrail_extensions_enabled = self._kwargs.get(
'contrail_extensions_enabled', False)
try:
sg_obj = self._security_group_neutron_to_vnc(
sg_q,
self._resource_get(id=sg_id))
except vnc_exc.NoIdError:
self._raise_contrail_exception(
'SecurityGroupNotFound', id=sg_id, resource='security_group')
self._resource_update(sg_obj)
ret_sg_q = self._security_group_vnc_to_neutron(
sg_obj, contrail_extensions_enabled)
return ret_sg_q
class SecurityGroupCreateHandler(res_handler.ResourceCreateHandler,
SecurityGroupMixin):
resource_create_method = "security_group_create"
def _create_security_group(self, sg_q):
project_id = self._project_id_neutron_to_vnc(sg_q['tenant_id'])
try:
project_obj = self._project_read(proj_id=project_id)
except vnc_exc.NoIdError:
raise self._raise_contrail_exception(
'ProjectNotFound', project_id=project_id,
resource='security_group')
id_perms = vnc_api.IdPermsType(enable=True,
description=sg_q.get('description'))
sg_vnc = vnc_api.SecurityGroup(name=sg_q['name'],
parent_obj=project_obj,
id_perms=id_perms)
return sg_vnc
def resource_create(self, context, sg_q):
contrail_extensions_enabled = self._kwargs.get(
'contrail_extensions_enabled', False)
sg_obj = self._security_group_neutron_to_vnc(
sg_q,
self._create_security_group(sg_q))
# ensure default SG and deny create if the group name is default
if sg_q['name'] == 'default':
self._ensure_default_security_group_exists(sg_q['tenant_id'])
self._raise_contrail_exception(
"SecurityGroupAlreadyExists", resource='security_group')
sg_uuid = self._resource_create(sg_obj)
# allow all egress traffic
def_rule = {}
def_rule['port_range_min'] = 0
def_rule['port_range_max'] = 65535
def_rule['direction'] = 'egress'
def_rule['remote_ip_prefix'] = '0.0.0.0/0'
def_rule['remote_group_id'] = None
def_rule['protocol'] = 'any'
def_rule['ethertype'] = 'IPv4'
def_rule['security_group_id'] = sg_uuid
sgrule_handler.SecurityGroupRuleHandler(
self._vnc_lib).resource_create(context, def_rule)
def_rule = {}
def_rule['port_range_min'] = 0
def_rule['port_range_max'] = 65535
def_rule['direction'] = 'egress'
def_rule['remote_ip_prefix'] = None
def_rule['remote_group_id'] = None
def_rule['protocol'] = 'any'
def_rule['ethertype'] = 'IPv6'
def_rule['security_group_id'] = sg_uuid
sgrule_handler.SecurityGroupRuleHandler(
self._vnc_lib).resource_create(context, def_rule)
ret_sg_q = self._security_group_vnc_to_neutron(
sg_obj, contrail_extensions_enabled)
return ret_sg_q
class SecurityGroupHandler(SecurityGroupGetHandler,
SecurityGroupCreateHandler,
SecurityGroupUpdateHandler,
SecurityGroupDeleteHandler):
pass
| {
"content_hash": "192485ba9c586ef173bf048679e3c49f",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 80,
"avg_line_length": 39.171779141104295,
"alnum_prop": 0.5534847298355521,
"repo_name": "tcpcloud/contrail-neutron-plugin",
"id": "060f721edd0194d5e67c6ce8effcb4cebf2c9bbd",
"size": "13385",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron_plugin_contrail/plugins/opencontrail/vnc_client/sg_res_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "468387"
}
],
"symlink_target": ""
} |
def can_build(env, platform):
return True
def configure(env):
pass
def get_doc_classes():
return [
"UPNP",
"UPNPDevice"
]
def get_doc_path():
return "doc_classes"
| {
"content_hash": "d950911fb61849f01deab903cfabeb1b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 29,
"avg_line_length": 14.428571428571429,
"alnum_prop": 0.5742574257425742,
"repo_name": "mcanders/godot",
"id": "8724ff1a511c6c2f21b6e8685479bb2a81e5dc4c",
"size": "202",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "modules/upnp/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "50004"
},
{
"name": "C++",
"bytes": "16781438"
},
{
"name": "HTML",
"bytes": "10302"
},
{
"name": "Java",
"bytes": "497034"
},
{
"name": "Makefile",
"bytes": "451"
},
{
"name": "Objective-C",
"bytes": "2644"
},
{
"name": "Objective-C++",
"bytes": "146786"
},
{
"name": "Python",
"bytes": "266116"
},
{
"name": "Shell",
"bytes": "11105"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field required_courses on 'Job'
m2m_table_name = db.shorten_name(u'core_job_required_courses')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('job', models.ForeignKey(orm[u'core.job'], null=False)),
('training', models.ForeignKey(orm[u'training.training'], null=False))
))
db.create_unique(m2m_table_name, ['job_id', 'training_id'])
def backwards(self, orm):
# Removing M2M table for field required_courses on 'Job'
db.delete_table(db.shorten_name(u'core_job_required_courses'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.aoi': {
'Meta': {'object_name': 'AOI'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'analyst': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'assignee_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'assignee_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aois'", 'to': u"orm['core.Job']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'polygon': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '5', 'max_length': '1'}),
'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'reviewers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'aoi_reviewers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Unassigned'", 'max_length': '15'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.comment': {
'Meta': {'object_name': 'Comment'},
'aoi': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AOI']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'core.job': {
'Meta': {'ordering': "('-created_at',)", 'object_name': 'Job'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'analysts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'analysts'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'assignee_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'assignee_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'feature_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['maps.FeatureType']", 'null': 'True', 'blank': 'True'}),
'grid': ('django.db.models.fields.CharField', [], {'default': "'usng'", 'max_length': '5'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.Map']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'progress': ('django.db.models.fields.SmallIntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project'", 'to': u"orm['core.Project']"}),
'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'required_courses': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['training.Training']", 'null': 'True', 'blank': 'True'}),
'reviewers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reviewers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.Group']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.project': {
'Meta': {'ordering': "('-created_at',)", 'object_name': 'Project'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'contributors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'contributors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project_admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'project_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'project_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.setting': {
'Meta': {'object_name': 'Setting'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'})
},
u'maps.featuretype': {
'Meta': {'ordering': "['-category', 'order', 'name', 'id']", 'object_name': 'FeatureType'},
'category': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '25', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'style': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'maps.map': {
'Meta': {'object_name': 'Map'},
'center_x': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}),
'center_y': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'projection': ('django.db.models.fields.CharField', [], {'default': "'EPSG:4326'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'default': '5', 'null': 'True', 'blank': 'True'})
},
u'training.training': {
'Meta': {'object_name': 'Training'},
'category': ('django.db.models.fields.CharField', [], {'default': "'Uncategorized'", 'max_length': '120', 'null': 'True', 'blank': 'True'}),
'content_link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'gamification_signals': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'primary_contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'quiz_data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users_completed': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'users_completed'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['core'] | {
"content_hash": "cfc5b65f30c07bfc936b0cb52e744066",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 209,
"avg_line_length": 83.70238095238095,
"alnum_prop": 0.550561797752809,
"repo_name": "jdaigneau/geoq",
"id": "577010d7c7048824bc2c3365ba56ffb4ef4999ac",
"size": "14086",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "geoq/core/migrations/0014_auto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "129228"
},
{
"name": "HTML",
"bytes": "266523"
},
{
"name": "JavaScript",
"bytes": "2011748"
},
{
"name": "Python",
"bytes": "882988"
}
],
"symlink_target": ""
} |
from functional.common import test
class QuotaTests(test.TestCase):
"""Functional tests for quota. """
# Test quota information for compute, network and volume.
EXPECTED_FIELDS = ['instances', 'networks', 'volumes']
PROJECT_NAME = None
@classmethod
def setUpClass(cls):
cls.PROJECT_NAME =\
cls.get_openstack_configuration_value('auth.project_name')
def test_quota_set(self):
self.openstack('quota set --instances 11 --volumes 11 --networks 11 '
+ self.PROJECT_NAME)
opts = self.get_show_opts(self.EXPECTED_FIELDS)
raw_output = self.openstack('quota show ' + self.PROJECT_NAME + opts)
self.assertEqual("11\n11\n11\n", raw_output)
def test_quota_show(self):
raw_output = self.openstack('quota show ' + self.PROJECT_NAME)
for expected_field in self.EXPECTED_FIELDS:
self.assertIn(expected_field, raw_output)
def test_quota_show_default_project(self):
raw_output = self.openstack('quota show')
for expected_field in self.EXPECTED_FIELDS:
self.assertIn(expected_field, raw_output)
| {
"content_hash": "a31f784b9e67cf85bc237b07dacc6843",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 38.3,
"alnum_prop": 0.6475195822454308,
"repo_name": "redhat-openstack/python-openstackclient",
"id": "0bc93db283172a48a51d6c726ecabb315093ade5",
"size": "1722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master-patches",
"path": "functional/tests/common/test_quota.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2229284"
},
{
"name": "Shell",
"bytes": "591"
}
],
"symlink_target": ""
} |
import argparse
import subprocess
import os
import time
parser = argparse.ArgumentParser(description='A tool to produce a findbugs report for browsing')
parser.add_argument('-c', '--classpath', nargs='*', type=str,
help='Specify path(s) to .class files')
parser.add_argument('-s', '--sourcepath', nargs='*', type=str,
help='Specify path(s) to .java files')
parser.add_argument('-d', '--destination', nargs=1, type=str,
help='Specify destination for the report.'),
parser.add_argument('-p', '--projectname', nargs=1, type=str,
help='Provide the name of the project to be analysed.')
args = parser.parse_args() # Parse arguments
#Check arguments provided
if not (args.sourcepath and args.destination and args.classpath and args.projectname):
parser.error('Not enough arguments supplied. Please use --help to enquire about usage.')
# Get a string of source and classpath. Also make paths absolute, as
# libJXR might break otherwise
sourcepath = ""
for item in args.sourcepath:
sourcepath = sourcepath + ' ' + os.path.abspath(item)
classpath = ""
for item in args.classpath:
classpath = classpath + ' ' + os.path.abspath(item)
destination = os.path.abspath(args.destination[0])
projectname = args.projectname[0]
print("Creating files.")
subprocess.check_call(["cp -a html " + destination], shell=True)
subprocess.check_call(["mkdir " + destination + "/jxr"], shell=True)
subprocess.check_call(["mkdir " + destination + "/xml"], shell=True)
# Findbugs
findbugscommand = "findbugs -textui -xml:withMessages -output " + destination +\
"/xml/findbugs.xml " + classpath
print("Creating findbugs report")
print("Command is " + findbugscommand)
subprocess.check_call([findbugscommand], shell=True)
#Write project specific infromation:
introfile = open(destination + '/intro.html', 'r+')
html = introfile.read()
html = html.replace('<p>Project: </p>', '<p>Project: ' + projectname + '</p>')
analysistime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
html = html.replace('<p>Date: </p>', '<p>Date: ' + analysistime + '</p>')
count_proc = subprocess.Popen(['find '+ sourcepath + ' -name "*.java" | wc -l'], shell=True, stdout=subprocess.PIPE)
count = count_proc.stdout.read()
number = (count.decode('utf-8')).replace('\n', '')
html = html.replace('<p>Number of java files: </p>', '<p>Number of java files: ' + number +'</p>')
introfile.seek(0)
introfile.write(html)
introfile.truncate()
introfile.close()
# JXR generation
jxrcommand = "java -jar jxr-er-1.0-jar-with-dependencies.jar -d " +\
destination + "/jxr -s " + sourcepath
print("Creating Java Cross Reference using jxr-er")
print("Command is " + jxrcommand)
subprocess.check_call([jxrcommand], shell=True)
print("Done! Start an http server in order to view the report.")
print("One way to do that is by using python: ")
print("cd " + destination)
print("python2 -m SimpleHTTPServer")
print("Now go on http://127.0.0.1:8000")
| {
"content_hash": "0079c275f16c8defea0c93ac1b745e0e",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 116,
"avg_line_length": 38.67948717948718,
"alnum_prop": 0.6794829300629764,
"repo_name": "ContemplateLtd/findbugs-reporter",
"id": "d409f868be5a697313269401f0eafe4d2018176f",
"size": "3041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "findbugs-reporter/src/deploy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27695"
},
{
"name": "Java",
"bytes": "6952"
},
{
"name": "JavaScript",
"bytes": "242577"
},
{
"name": "Python",
"bytes": "3041"
}
],
"symlink_target": ""
} |
"""Any function from n inputs to m outputs"""
import logging
from itertools import zip_longest
import pypes.component
log = logging.getLogger(__name__)
def default_function(*args):
"pass"
return args
class NMFunction(pypes.component.Component):
"""
mandatory input packet attributes:
- data: for each of the input ports
parameters:
- function: [default: merge the inputs into a list if more than one
input, then replicate over all the outputs]
output packet attributes:
- data: each of the M outputs goes to an output port
"""
# defines the type of component we're creating.
__metatype__ = 'TRANSFORMER'
def __init__(self, n=1, m=1):
# initialize parent class
pypes.component.Component.__init__(self)
# Optionally add/remove component ports
# self.remove_output('out')
self._n = n
self._m = m
self._in_ports = ["in"]
self._out_ports = ["out"]
if n > 1:
self._in_ports += ["in{0}".format(i)
for i in range(1, n)]
for port in self._in_ports:
self.add_input(port, 'input')
if m > 1:
self._out_ports += ["out{0}".format(i)
for i in range(1, m)]
for port in self._out_ports:
self.add_output(port, 'output')
# Setup any user parameters required by this component
# 2nd arg is the default value, 3rd arg is optional list of choices
self.set_parameter('function', default_function)
# log successful initialization message
log.debug('Component Initialized: %s', self.__class__.__name__)
def run(self):
# Define our components entry point
while True:
function = self.get_parameter('function')
name = function.__name__
packets = [self.receive(port)
for port in self._in_ports]
try:
args = [packet.get("data")
for packet in packets]
log.debug("%s: args %s", name, args)
results = function(*args)
log.debug("%s: results %s", name, results)
if self._m == 1:
packet = packets[0]
packet.set("data", results[0])
self.send("out", packet)
elif self._m > 1 and len(results) <= self._m:
for result, port in zip_longest(results,
self._out_ports,
fillvalue=results[-1]):
packet = pypes.packet.Packet()
for key, value in packets[0]:
packet.set(key, value)
packet.set("data", result)
log.debug("%s: sending %s to %s",
name, packet.get("data"), port)
self.send(port, packet)
else:
raise ValueError("too many results!")
except:
log.error('Component Failed: %s',
name, exc_info=True)
# yield the CPU, allowing another component to run
self.yield_ctrl()
| {
"content_hash": "e5ad550afa64a91fc381b8ffe7f98b08",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 75,
"avg_line_length": 34.244897959183675,
"alnum_prop": 0.4940405244338498,
"repo_name": "Enucatl/pypes",
"id": "34745d31e8528b90201cd45a66d04fa04c2adb86",
"size": "3356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypes/plugins/nm_function.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168335"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import jsl
from .._compat import iteritems, iterkeys
def _indent(line, spaces=4):
return ' ' * spaces + line
def _get_type_hint(field, class_to_helpers):
hint = None
if isinstance(field, jsl.BooleanField):
hint = 'bool'
elif isinstance(field, jsl.StringField):
hint = 'str'
elif isinstance(field, jsl.IntField):
hint = 'int'
elif isinstance(field, jsl.NumberField):
hint = 'numbers.Number'
elif isinstance(field, jsl.ArrayField):
nested_field_hint = _get_type_hint(field.items, class_to_helpers)
hint = 'list[{}]'.format(nested_field_hint)
elif isinstance(field, jsl.DictField):
hint = 'dict'
elif isinstance(field, jsl.OneOfField):
nested_field_hints = [_get_type_hint(f, class_to_helpers) for f in field.fields]
hint = ' | '.join(nested_field_hints)
elif isinstance(field, jsl.DocumentField):
hint = class_to_helpers.get(field.document_cls)
return hint
def _get_type_declaration(name, field, class_to_helpers):
"""
:param name: field name
:type name: str
:param field: field to get a hint for
:type field: instance of :class:`jsl.BaseField`
:rtype: list of strings
"""
hint = _get_type_hint(field, class_to_helpers)
return [':type {0}: {1}'.format(name, hint)] if hint else []
def _get_cls_docstring(document_cls, class_to_helpers):
"""
:type document_cls: :class:`jsl.Document`
:rtype: list of strings
"""
lines = ['"""']
for name, field in iteritems(document_cls._fields):
hint_lines = _get_type_declaration(name, field, class_to_helpers)
lines.extend(hint_lines)
lines.append('"""')
return lines
def _get_cls_fields(document_cls):
"""
:type document_cls: :class:`jsl.Document`
:rtype: list of strings
"""
lines = []
for name in iterkeys(document_cls._fields):
lines.append('{0} = Attribute(\'{0}\')'.format(name))
return lines
def _get_class(name, document_cls, class_to_helpers):
"""
:type name: str
:type document_cls: :class:`jsl.Document`
:rtype: list of strings
"""
lines = ['class {0}(DotExpandedDict):'.format(name)]
lines.extend(_indent(line) for line in _get_cls_docstring(document_cls, class_to_helpers))
lines.extend(_indent(line) for line in _get_cls_fields(document_cls))
return lines
def generate_module(classes):
"""
:param classes: a dictionary maping helper names to their JSL-documents
:type classes: dict from str to :class:`jsl.Document`
:rtype: list of strings
"""
lines = ['from flask.ext.tuktuk.helpers import DotExpandedDict, Attribute', '', '']
class_to_helpers = dict((v, k) for k, v in iteritems(classes))
print class_to_helpers
for helper_name, document_cls in iteritems(classes):
lines.extend(_get_class(helper_name, document_cls, class_to_helpers))
lines.extend(['', ''])
return lines
| {
"content_hash": "1c555d271fb0931b16019bd307fbb274",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 94,
"avg_line_length": 31.705263157894738,
"alnum_prop": 0.6394422310756972,
"repo_name": "aromanovich/flask-tuktuk",
"id": "adcf58caaaa1dfceb9ebd61009fe437fb67c709a",
"size": "3028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_tuktuk/helpers/pycharm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26688"
},
{
"name": "Shell",
"bytes": "164"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/medicine/crafted/shared_medpack_disease_willpower_a.iff"
result.attribute_template_id = 7
result.stfName("medicine_name","medic_disease_willpower_a")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "47eeb882310de3bba6673b5061ea6bcf",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 92,
"avg_line_length": 26.23076923076923,
"alnum_prop": 0.718475073313783,
"repo_name": "obi-two/Rebelion",
"id": "8ecb1dd9910cda980a1a9938e6fcbcdb7a00444f",
"size": "486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/medicine/crafted/shared_medpack_disease_willpower_a.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import time
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
def time_it(func):
start = time.time()
func()
print('%s took %.3fs' % (func.func_name, time.time() - start))
class SomeObject(object):
baz = 'bar.baz'
def func(self):
pass
class AnActor(ThreadingActor):
bar = SomeObject()
bar.pykka_traversable = True
foo = 'foo'
def __init__(self):
self.baz = 'quox'
def func(self):
pass
def test_direct_plain_attribute_access():
actor = AnActor.start().proxy()
for i in range(10000):
actor.foo.get()
def test_direct_callable_attribute_access():
actor = AnActor.start().proxy()
for i in range(10000):
actor.func().get()
def test_traversible_plain_attribute_access():
actor = AnActor.start().proxy()
for i in range(10000):
actor.bar.baz.get()
def test_traversible_callable_attribute_access():
actor = AnActor.start().proxy()
for i in range(10000):
actor.bar.func().get()
if __name__ == '__main__':
try:
time_it(test_direct_plain_attribute_access)
time_it(test_direct_callable_attribute_access)
time_it(test_traversible_plain_attribute_access)
time_it(test_traversible_callable_attribute_access)
finally:
ActorRegistry.stop_all()
| {
"content_hash": "7aa0d584253af25133e74f228b6a41bc",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 66,
"avg_line_length": 21.109375,
"alnum_prop": 0.6291635825314582,
"repo_name": "PabloTunnon/pykka-deb",
"id": "3f03c310c171625c7e4bce209389dbdd527f8d87",
"size": "1351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/performance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "284"
},
{
"name": "Python",
"bytes": "84620"
}
],
"symlink_target": ""
} |
import warnings as _warnings
_warnings.resetwarnings()
_warnings.filterwarnings('error')
from tdi import text
template = text.from_string("""
Hello [[name]]!
Look, hey, this is a text []template]. And here's a list:
[item]* some stuff[/item][tdi=":item"]
[/]
Thanks for [[+listening]].
""".lstrip())
class Model(object):
def render_name(self, node):
node.content = u"Andr\xe9"
def render_item(self, node):
for snode, fruit in node.iterate((u'apple', u'pear', u'cherry')):
snode.content = u'* %s' % fruit
template.render(Model())
| {
"content_hash": "a69b87cd1f2f29606271932175c2e1a2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 73,
"avg_line_length": 22.03846153846154,
"alnum_prop": 0.6387434554973822,
"repo_name": "ndparker/tdi",
"id": "1fb347af1470667942575d7beaa68a6ebdf6d451",
"size": "595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/rendering/text.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "567028"
},
{
"name": "C++",
"bytes": "6510"
},
{
"name": "HTML",
"bytes": "998"
},
{
"name": "Python",
"bytes": "1032169"
},
{
"name": "Shell",
"bytes": "425"
}
],
"symlink_target": ""
} |
"""Layers using cuDNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import cudnn_rnn as contrib_cudnn_rnn
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
def _single_lstm(input_emb, input_len, hidden_size, is_fwd, use_cudnn):
"""Compute the outputs of a single LSTM (subroutine of stacked_bilstm).
Be careful if used anywhere outside of stacked_bilstm, which converts the
sequences to the time-major format expected by this function.
Args:
input_emb: <float32> [sequence_length, batch_size, emb]
input_len: <int32> [batch_size]
hidden_size: Number of units in the LSTM cell.
is_fwd: Boolean indicator the directionality of the LSTM.
use_cudnn: Boolean indicating the use of cudnn.
Returns:
output_emb: <float32> [sequence_length, batch_size, emb]
"""
if not is_fwd:
input_emb = tf.reverse_sequence(
input_emb,
input_len,
seq_axis=0,
batch_axis=1)
if use_cudnn:
lstm = contrib_cudnn_rnn.CudnnLSTM(
num_layers=1,
num_units=hidden_size,
input_mode=cudnn_rnn_ops.CUDNN_INPUT_LINEAR_MODE,
direction=cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION)
lstm.build(input_emb.shape)
output_emb, _ = lstm(input_emb)
else:
cell = contrib_cudnn_rnn.CudnnCompatibleLSTMCell(hidden_size)
cell = contrib_rnn.MultiRNNCell([cell])
output_emb, _ = tf.nn.dynamic_rnn(
cell=cell,
inputs=input_emb,
sequence_length=input_len,
dtype=tf.float32,
time_major=True)
if not is_fwd:
output_emb = tf.reverse_sequence(
output_emb,
input_len,
seq_axis=0,
batch_axis=1)
return output_emb
def stacked_bilstm(input_emb, input_len, hidden_size, num_layers, dropout_ratio,
mode, use_cudnn=None):
"""Encode inputs via stacked bidirectional LSTMs with residual connections.
Args:
input_emb: <float32> [batch_size, sequence_length, emb]
input_len: <int32> [batch_size]
hidden_size: Size of each LSTM layer.
num_layers: Number of LSTM layers.
dropout_ratio: Probability of dropout out dimensions of each hidden layer.
mode: One of the keys from tf.estimator.ModeKeys.
use_cudnn: Specify the use of cudnn. `None` denotes automatic selection.
Returns:
output_emb: <float32> [batch_size, sequence_length, emb]
"""
# cuDNN expects time-major inputs, so we transpose before and after.
input_emb = tf.transpose(input_emb, [1, 0, 2])
if use_cudnn is None:
use_cudnn = tf.test.is_gpu_available(cuda_only=True)
for i in range(num_layers):
with tf.variable_scope("lstm_{}".format(i)):
if mode == tf_estimator.ModeKeys.TRAIN:
input_emb = tf.nn.dropout(input_emb, 1.0 - dropout_ratio)
output_emb = []
for is_fwd in (True, False):
with tf.variable_scope("fw" if is_fwd else "bw"):
output_emb.append(_single_lstm(
input_emb=input_emb,
input_len=input_len,
hidden_size=hidden_size,
is_fwd=is_fwd,
use_cudnn=use_cudnn))
output_emb = tf.concat(output_emb, -1)
if i == 0:
input_emb = output_emb
else:
# Add residual connection after the first layer.
input_emb += output_emb
# cuDNN expects time-major inputs, so we transpose before and after.
output_emb = tf.transpose(input_emb, [1, 0, 2])
return output_emb
| {
"content_hash": "6e85473bba399306f609c16288cab0fd",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 80,
"avg_line_length": 34.41121495327103,
"alnum_prop": 0.6613253666485606,
"repo_name": "google-research/language",
"id": "d348990fad6568114c92c1a4aa1114232547e067",
"size": "4297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/common/layers/cudnn_layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals,print_function
import six
import subprocess
import sys
import re
import h5py
import time
import numpy as np
from vmc_postproc import load
def vmc_exec(**kwargs):
"""
arguments:
- nprocs (default 4)`
- hosts (default localhost)
- slurmqueue (default False)
- partition (default qwfall)
- rundir (default .)
plus all those given by runing vmc -h.
returns a dictionnary with all measurements as
asked using the kwargs.
"""
opts=get_vmc_args()
opts+=['nprocs','hosts']
kwargs.setdefault('nprocs',4)
kwargs.setdefault('hosts','localhost')
kwargs.setdefault('dir','.')
kwargs.setdefault('partition','qwfall')
kwargs.setdefault('rundir','.')
kwargs.setdefault('slurmqueue',False)
nsamp=kwargs.setdefault('samples',10)
if nsamp<kwargs['nprocs']:
kwargs['samples']=1
else:
kwargs['samples']=int(nsamp/kwargs['nprocs'])+1
add_keys=['nprocs','hosts','slurmqueue','partition','rundir']
meas_trans={'meas_magnetization':'Magnetization',\
'meas_projheis':'ProjHeis',\
'meas_stagmagn':'StagMagn',\
'meas_statspinstruct':'StatSpinStruct'}
if kwargs.setdefault('stagflux_wav',False):
meas_trans['meas_stagmagn']='StagMagnZ'
vmcargs=[]
for key,value in kwargs.items():
if not key in add_keys:
if not key in opts:
raise RuntimeError('Unrecognized option \'{}\'.'.format(key))
if type(value)==bool:
if value:
vmcargs+=['--{}'.format(key)]
else:
vmcargs+=['--{key}={val}'.format(key=key,val=value)]
if kwargs['slurmqueue']:
vmcargsinline=''
for a in vmcargs:
vmcargsinline+=' '+a
batchscript="""#!/bin/bash
#SBATCH -n {nprocs}
#SBATCH -c 1
#SBATCH -J vmc_exec
#SBATCH -p {partition}
#SBATCH --error=vmc_exec-%j.stderr
#SBATCH --output=vmc_exec-%j.stdout
cd {rundir}
mpirun ./vmc --prefix=$SLURM_JOB_ID""".format(nprocs=kwargs['nprocs'],partition=kwargs['partition'],rundir=kwargs['rundir'])+vmcargsinline
batch_file=open('vmc_exec_batch.sh','w')
batch_file.write(batchscript)
batch_file.close()
slurmproc=subprocess.Popen(['sbatch','vmc_exec_batch.sh'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout,stderr=slurmproc.communicate()
if six.PY3:
stdout=str(stdout,encoding='utf-8')
stderr=str(stderr,encoding='utf-8')
else:
stdout=unicode(stdout,encoding='utf-8')
stderr=unicode(stderr,encoding='utf-8')
print(stderr)
print(stdout)
prefix=re.findall(r'Submitted batch job ([0-9]+)',stdout)
prefix=prefix[0]
done=False
while not done:
squeueproc=subprocess.Popen(['squeue','-h','-j',str(prefix)],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout,stderr=squeueproc.communicate()
done=(len(stdout)==0)
time.sleep(10)
print('Finished calculation '+str(prefix))
else:
vmcproc=subprocess.Popen(['mpiexec','-np',str(kwargs['nprocs']),'--host',kwargs['hosts'],'vmc']+vmcargs,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout,stderr=vmcproc.communicate()
if six.PY3:
stdout=str(stdout,encoding='utf-8')
stderr=str(stderr,encoding='utf-8')
else:
stdout=unicode(stdout,encoding='utf-8')
stderr=unicode(stderr,encoding='utf-8')
print(stderr)
prefix=re.findall(r'output prefix=([0-9]+)',stdout)
prefix=prefix[0]
outq=dict()
for meas in meas_trans.keys():
if kwargs.setdefault(meas,False):
outq[meas_trans[meas]]=load.get_quantity(kwargs['dir']+'/{prefix}-{measname}.h5'.format(prefix=prefix,measname=meas_trans[meas]),nsamp)
return outq
def get_vmc_args():
"""
get all arguments from the vmc command
"""
vmcquery=subprocess.Popen(['vmc'],stdout=subprocess.PIPE)
vmcargs,_=vmcquery.communicate()
if six.PY3:
vmcargs=str(vmcargs,encoding='utf-8')
else:
vmcargs=unicode(vmcargs,encoding='utf-8')
opts=[]
for l in vmcargs.split('\n'):
opts+=re.findall(r'--([a-zA-Z0-9_]*)=?.*',l)
return opts
| {
"content_hash": "8a2464a52b6a9e3fcfcc925891700639",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 158,
"avg_line_length": 35.98347107438016,
"alnum_prop": 0.6093247588424437,
"repo_name": "EPFL-LQM/gpvmc",
"id": "c13acb219dec3778a0f4db0a5d1c9dd392fa154f",
"size": "4372",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/vmc_postproc/execute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6274"
},
{
"name": "C++",
"bytes": "195434"
},
{
"name": "FORTRAN",
"bytes": "235"
},
{
"name": "Python",
"bytes": "3010"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
"""
This script is used to run tests, create a coverage report and output the
statistics at the end of the tox run.
To run this script just execute ``tox``
"""
import re
from fabric.api import local, warn
from fabric.colors import green, red
if __name__ == '__main__':
local('flake8 --ignore=E126 --ignore=W391 --statistics'
' --exclude=submodules,migrations,south_migrations,build .')
local('coverage run --source="currency_history" manage.py test -v 2'
' --traceback --failfast --settings=currency_history.tests.settings'
' --pattern="*_tests.py"')
local('coverage html -d coverage --omit="*__init__*,*/settings/*,'
'*/migrations/*,*/south_migrations/*,*/tests/*,*admin*"')
total_line = local('grep -n pc_cov coverage/index.html', capture=True)
percentage = float(re.findall(r'(\d+)%', total_line)[-1])
if percentage < 100:
warn(red('Coverage is {0}%'.format(percentage)))
print(green('Coverage is {0}%'.format(percentage)))
| {
"content_hash": "5b7c03a36c5e079bc62af84be13e5126",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 41.12,
"alnum_prop": 0.646887159533074,
"repo_name": "bitmazk/django-currency-history",
"id": "4731ccc872b1c6ecdd458d44ab5484112b8448d5",
"size": "1028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22863"
}
],
"symlink_target": ""
} |
"""plane3.py
Representation of a directed plane in 3D.
License:
http://www.apache.org/licenses/LICENSE-2.0"""
from .vector3 import Vector3
class Plane3(object):
def __init__(self, normal: Vector3, w: float) -> None:
self.__normal = normal
self.__w = w
@property
def normal(self) -> Vector3:
return self.__normal
@property
def w(self) -> float:
return self.__w
def clone(self) -> 'Plane3':
return Plane3(self.__normal.clone(), self.__w)
def flipped(self) -> 'Plane3':
return Plane3(self.__normal.negated(), -self.__w)
def distance(self, vector: Vector3) -> float:
return self.__normal.dot(vector) - self.__w
def coplanar(self, vector: Vector3, epsilon: float = 0.000001) -> bool:
return self.distance(vector) < epsilon
def equivalent(self, other: 'Plane3', epsilon: float = 0.000001) -> bool:
return self.__normal.equivalent(other.__normal) and abs(self.__w - other.__w) < epsilon
@staticmethod
def from_points(a: Vector3, b: Vector3, c: Vector3) -> 'Plane3':
normal = b.minus(a).cross(c.minus(a)).unit()
return Plane3(normal, normal.dot(a))
| {
"content_hash": "06a82ec36dea926687c5ce5b3f292768",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 95,
"avg_line_length": 27.790697674418606,
"alnum_prop": 0.6092050209205021,
"repo_name": "GarrettGutierrez1/FFG_Geo",
"id": "83fecec6012394604e419fd8cc7ab0774c1bd158",
"size": "1219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ffg_geo/plane3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "99606"
}
],
"symlink_target": ""
} |
import logging
import apache_beam as beam
from apache_beam import CombineFn
from apache_beam import CombineGlobally
from apache_beam import Create
from apache_beam import Map
class AverageFn(CombineFn):
"""
This is the same as the Mean PTransform, but it's used as an example
to show the combiner interface
"""
def create_accumulator(self):
# create and initialise accumulator for sum and count
initial_sum = 0
initial_count = 0
return initial_sum, initial_count
def add_input(self, accumulator, element):
# accumulates each element from input in accumulator
new_total = accumulator[0] + element
new_count = accumulator[1] + 1
return new_total, new_count
def merge_accumulators(self, accumulators):
# Multiple accumulators could be processed in parallel,
# this function merges them
sums = [accumulator[0] for accumulator in accumulators]
counts = [accumulator[1] for accumulator in accumulators]
return sum(sums), sum(counts)
def extract_output(self, accumulator):
# calculations before final output
return accumulator[0] / accumulator[1]
def run(args=None):
elements = range(100)
with beam.Pipeline() as p:
output = (p | Create(elements)
| "Global Average" >> CombineGlobally(AverageFn())
| "Log" >> Map(logging.info))
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
run()
| {
"content_hash": "009c02713663d5b6244212bb12fcf885",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 70,
"avg_line_length": 28.56,
"alnum_prop": 0.6967787114845938,
"repo_name": "GoogleCloudPlatform/dataflow-cookbook",
"id": "fe5fef264bea74ca9d831c56a32f0991b2301694",
"size": "2014",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "Python/basics/combine_interface.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "330379"
},
{
"name": "Python",
"bytes": "123817"
},
{
"name": "Scala",
"bytes": "88960"
}
],
"symlink_target": ""
} |
import binascii
import uuid
from castellan.common.objects import symmetric_key as key
import mock
from oslo_concurrency import processutils
from nova.tests.unit.volume.encryptors import test_cryptsetup
from nova.volume.encryptors import luks
class LuksEncryptorTestCase(test_cryptsetup.CryptsetupEncryptorTestCase):
def _create(self, connection_info):
return luks.LuksEncryptor(connection_info)
@mock.patch('nova.utils.execute')
def test_is_luks(self, mock_execute):
luks.is_luks(self.dev_path)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
], any_order=False)
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.volume.encryptors.luks.LOG')
@mock.patch('nova.utils.execute')
def test_is_luks_with_error(self, mock_execute, mock_log):
error_msg = "Device %s is not a valid LUKS device." % self.dev_path
mock_execute.side_effect = \
processutils.ProcessExecutionError(exit_code=1,
stderr=error_msg)
luks.is_luks(self.dev_path)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
self.assertEqual(1, mock_log.warning.call_count) # warning logged
@mock.patch('nova.utils.execute')
def test__format_volume(self, mock_execute):
self.encryptor._format_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--key-file=-', self.dev_path,
process_input='passphrase',
run_as_root=True, check_exit_code=True, attempts=3),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test__open_volume(self, mock_execute):
self.encryptor._open_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input='passphrase',
run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_attach_volume(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = test_cryptsetup.fake__get_key(
None, fake_key)
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True),
])
self.assertEqual(2, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_attach_volume_not_formatted(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = test_cryptsetup.fake__get_key(
None, fake_key)
mock_execute.side_effect = [
processutils.ProcessExecutionError(exit_code=1), # luksOpen
processutils.ProcessExecutionError(exit_code=1), # isLuks
mock.DEFAULT, # luksFormat
mock.DEFAULT, # luksOpen
mock.DEFAULT, # ln
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--key-file=-', self.dev_path, process_input=fake_key,
run_as_root=True, check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True),
], any_order=False)
self.assertEqual(5, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_attach_volume_fail(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = test_cryptsetup.fake__get_key(
None, fake_key)
mock_execute.side_effect = [
processutils.ProcessExecutionError(exit_code=1), # luksOpen
mock.DEFAULT, # isLuks
]
self.assertRaises(processutils.ProcessExecutionError,
self.encryptor.attach_volume, None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
], any_order=False)
self.assertEqual(2, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test__close_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
attempts=3, run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_detach_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
attempts=3, run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
def test_get_mangled_passphrase(self):
# Confirm that a mangled passphrase is provided as per bug#1633518
unmangled_raw_key = bytes(binascii.unhexlify('0725230b'))
symmetric_key = key.SymmetricKey('AES', len(unmangled_raw_key) * 8,
unmangled_raw_key)
unmangled_encoded_key = symmetric_key.get_encoded()
encryptor = luks.LuksEncryptor(self.connection_info)
self.assertEqual(encryptor._get_mangled_passphrase(
unmangled_encoded_key), '72523b')
@mock.patch('nova.utils.execute')
def test_attach_volume_unmangle_passphrase(self, mock_execute):
fake_key = '0725230b'
fake_key_mangled = '72523b'
self.encryptor._get_key = mock.MagicMock(name='mock_execute')
self.encryptor._get_key.return_value = \
test_cryptsetup.fake__get_key(None, fake_key)
mock_execute.side_effect = [
processutils.ProcessExecutionError(exit_code=2), # luksOpen
mock.DEFAULT, # luksOpen
mock.DEFAULT, # luksClose
mock.DEFAULT, # luksAddKey
mock.DEFAULT, # luksOpen
mock.DEFAULT, # luksClose
mock.DEFAULT, # luksRemoveKey
mock.DEFAULT, # luksOpen
mock.DEFAULT, # ln
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key_mangled,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'luksClose', self.dev_name,
run_as_root=True, check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksAddKey', self.dev_path,
process_input=''.join([fake_key_mangled,
'\n', fake_key,
'\n', fake_key]),
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'luksClose', self.dev_name,
run_as_root=True, check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksRemoveKey', self.dev_path,
process_input=fake_key_mangled, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True),
], any_order=False)
self.assertEqual(9, mock_execute.call_count)
| {
"content_hash": "74a2c260f58e67750b6da99bb1dd54d6",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 79,
"avg_line_length": 45.63274336283186,
"alnum_prop": 0.5567730049452148,
"repo_name": "vmturbo/nova",
"id": "dea67176dc240e51edbf7b67f942440b55c242c4",
"size": "10989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/volume/encryptors/test_luks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "18983608"
},
{
"name": "Shell",
"bytes": "31813"
},
{
"name": "Smarty",
"bytes": "307089"
}
],
"symlink_target": ""
} |
from eclcli.common import command
from eclcli.common import exceptions
from eclcli.common import utils
from ..rcaclient.common.utils import objectify
class ListVersion(command.Lister):
def get_parser(self, prog_name):
parser = super(ListVersion, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
rca_client = self.app.client_manager.rca
columns = (
'status',
'id',
'links'
)
column_headers = (
'Status',
'ID',
'Links'
)
data = rca_client.versions.list()
return (column_headers,
(utils.get_item_properties(
s, columns, formatters={'links': utils.format_list_of_dicts}
) for s in data))
class ShowVersion(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowVersion, self).get_parser(prog_name)
parser.add_argument(
'version',
metavar='<version>',
help="API version for Inter Connect Gateway Service"
)
return parser
def take_action(self, parsed_args):
rca_client = self.app.client_manager.rca
version = parsed_args.version
try:
version_info = rca_client.versions.get(version)
printout = version_info._info
except exceptions.ClientException as clientexp:
printout = {"code": clientexp.code,
"message": clientexp.message,}
columns = utils.get_columns(printout)
data = utils.get_item_properties(
objectify(printout),
columns,
formatters={'links': utils.format_list_of_dicts})
return columns, data
| {
"content_hash": "00e2a0ad314fdda4d9fee7c2d20cbf78",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 80,
"avg_line_length": 30.56896551724138,
"alnum_prop": 0.5764241398759166,
"repo_name": "anythingrandom/eclcli",
"id": "6a36792e3118bbd728395934afc2fb88b528edb3",
"size": "1798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eclcli/rca/v2/version.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1647657"
}
],
"symlink_target": ""
} |
""" Resource references. """
# Enthought library imports.
from enthought.traits.api import Any, HasTraits, Instance
# Local imports.
from resource_factory import ResourceFactory
class ResourceReference(HasTraits):
""" Abstract base class for resource references.
Resource references are returned from calls to 'locate_reference' on the
resource manager.
"""
# The resource factory that will be used to load the resource.
resource_factory = Instance(ResourceFactory) # ReadOnly
###########################################################################
# 'ResourceReference' interface.
###########################################################################
def load(self):
""" Loads the resource. """
raise NotImplementedError
class ImageReference(ResourceReference):
""" A reference to an image resource. """
# Iff the image was found in a file then this is the name of that file.
filename = Any # ReadOnly
# Iff the image was found in a zip file then this is the image data that
# was read from the zip file.
data = Any # ReadOnly
def __init__(self, resource_factory, filename=None, data=None):
""" Creates a new image reference. """
self.resource_factory = resource_factory
self.filename = filename
self.data = data
return
###########################################################################
# 'ResourceReference' interface.
###########################################################################
def load(self):
""" Loads the resource. """
if self.filename is not None:
image = self.resource_factory.image_from_file(self.filename)
elif self.data is not None:
image = self.resource_factory.image_from_data(self.data)
else:
raise ValueError("Image reference has no filename OR data")
return image
#### EOF ######################################################################
| {
"content_hash": "831d994169a1443fd7b90c74112d4290",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 29.1,
"alnum_prop": 0.5316642120765832,
"repo_name": "enthought/traitsgui",
"id": "34f0f357f72e0a406be9fd706ef38f142a52e3e3",
"size": "2679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/resource/resource_reference.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1196658"
}
],
"symlink_target": ""
} |
import httplib2
import json
from six.moves import urllib
from . import config
class request:
def __init__(self, host, port, network, username, password):
self.host = host
self.port = port
self.network = network
self.http = httplib2.Http()
self.username = username
self.password = password
def basejoin(self, url):
return urllib.parse.urljoin(self.get_url(), url)
def get_url(self):
return "http://%s:%s" % (self.host, self.port)
def request(self, url, method, data):
return self.http.request(
self.basejoin(url),
method.upper(),
json.dumps(data),
headers={'Content-Type': "application/json"})
def post(self, url, extra={}):
return self.request(url, 'POST', self.basedata(extra))
def get(self, url, extra={}):
return self.request(url, 'POST', self.basedata(extra))
def basedata(self, extra={}):
data = {
'username': self.username,
'password': self.password,
'network': self.network}
data.update(extra)
def connections(self):
return self.get(config.URL_CONNECTIONS)
def reconfigure(self):
return self.post(config.URL_RECONFIGURE)
def connect(self, servername):
return self.post(config.URL_CONNECT, {'network': servername})
def reconnect(self, servername):
return self.port(config.URL_RECONNECT, {'network': servername})
def disconnect(self, servername):
return self.post(config.URL_DISCONNECT, {'network': servername})
def command(self, query):
return self.post(config.URL_COMMANDS, {'action': query}) | {
"content_hash": "e613f0f35ff3683da6c56e5ed21106ca",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 72,
"avg_line_length": 28.383333333333333,
"alnum_prop": 0.6100998238402818,
"repo_name": "IsmaelRLG/simpbot",
"id": "2ecae62033484c2493e73176cc8c8fa10b629b63",
"size": "1795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simpbot/api/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "310649"
}
],
"symlink_target": ""
} |
from sppy.linalg.GeneralLinearOperator import GeneralLinearOperator
from sppy.linalg.core import rsvd, norm, biCGSTAB | {
"content_hash": "b0d524f37d125b04d94e64afb007f8b2",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 68,
"avg_line_length": 59,
"alnum_prop": 0.864406779661017,
"repo_name": "ICML14MoMCompare/MoMs-for-StochasticLanguages",
"id": "6e731d6e2681aedfbde5b1d234eba52694bcade0",
"size": "119",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "code/spectral/build/sppy/build/lib.linux-x86_64-2.7/sppy/linalg/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "14902045"
},
{
"name": "C++",
"bytes": "110685913"
},
{
"name": "Cuda",
"bytes": "29867"
},
{
"name": "EmberScript",
"bytes": "6466967"
},
{
"name": "Makefile",
"bytes": "1692178"
},
{
"name": "Matlab",
"bytes": "3361"
},
{
"name": "Objective-C",
"bytes": "17070"
},
{
"name": "Perl",
"bytes": "6080"
},
{
"name": "Python",
"bytes": "1123728"
},
{
"name": "Shell",
"bytes": "977479"
},
{
"name": "TeX",
"bytes": "2118884"
}
],
"symlink_target": ""
} |
from django.db import models
class Skill(models.Model):
name = models.CharField(max_length=64)
description = models.TextField(blank=True)
parent_id = models.IntegerField(default=-1)
problem_list = models.TextField(blank=True)
priority = models.IntegerField(default=0)
@property
def parsed_problem_list(self):
return list(map(int, filter(lambda x: x, self.problem_list.split(','))))
| {
"content_hash": "5cd816877571408f04669caf221daaeb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 76,
"avg_line_length": 31,
"alnum_prop": 0.7320099255583127,
"repo_name": "ultmaster/eoj3",
"id": "435d8f33117f36d0b0072b53b5919db38e698389",
"size": "403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problem/models/skill.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "34134"
},
{
"name": "CSS",
"bytes": "16519"
},
{
"name": "HTML",
"bytes": "257689"
},
{
"name": "JavaScript",
"bytes": "60776"
},
{
"name": "Python",
"bytes": "767732"
},
{
"name": "TeX",
"bytes": "21976"
}
],
"symlink_target": ""
} |
from django import forms
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from django_comments.forms import CommentForm as CommentFormBase
from bearded_comments.models import TComment
from bearded_comments.models import TCommentNode
from bearded_comments import settings
class TCommentForm(CommentFormBase):
title = forms.CharField(label=_('Title'), required=False, max_length=settings.COMMENTS_TITLE_MAX_LENGTH)
parent = forms.IntegerField(required=False, widget=forms.HiddenInput)
def clean_parent(self):
parent = self.cleaned_data['parent']
if parent: # For non-root comments
try:
TCommentNode.objects.get(pk=parent)
except TCommentNode.DoesNotExist:
raise ValidationError(_('Parent comment node %s does not exist') % parent, code='invalid')
return parent
def get_comment_model(self):
return TComment
def get_comment_create_data(self):
"""
Extends base method adding 'title' data to the result.
"""
data = super().get_comment_create_data()
data['title'] = self.cleaned_data['title']
return data | {
"content_hash": "f266587c0c4b5ac69c76e1b64e0d4c7e",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 108,
"avg_line_length": 37.6875,
"alnum_prop": 0.6907131011608624,
"repo_name": "gooslap/django-bearded-comments",
"id": "30ea59de2559fb64212d66fd801db5a4f625a179",
"size": "1206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bearded_comments/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "505"
},
{
"name": "Python",
"bytes": "17695"
}
],
"symlink_target": ""
} |
import OSIM.Simulation.Utils as u
from OSIM.Modeling.Components.Charge import Charge
from OSIM.Modeling.Components.NPN_Vertical_Bipolar_Intercompany_Model.VBIC_ParasitPNP.VBIC_ParasitTransportCurrent import ParasitTransportCurrent
import numpy as np
from numba import jit
class QDBE(Charge):
def __init__(self, nodes, name, value, superComponent, **kwargs):
super(QDBE, self).__init__(nodes, name, value, superComponent,**kwargs)
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec (variableExpr)
self.UT = eval(self.paramDict.get("ut", "0.026"))
self.TF = eval(self.paramDict.get("tf", "2.67E-13"))
self.QTF = eval(self.paramDict.get("qtf", "1E-18"))
self.XTF = eval(self.paramDict.get("xtf", "20"))
self.VTF = eval(self.paramDict.get("vtf", "10"))
self.ITF = eval(self.paramDict.get("itf", "0.1"))
self.diffh = 0.000000001
def TFF(self,V):
itf = self.superComponent.IT.itf
q1 = self.superComponent.IT.getq1()
b = 1+self.XTF*(itf/(itf+self.ITF))**2*u.exp(V,1/(1.44*self.VTF), 1.5)
return self.TF*(1+self.QTF*q1)*b
def getCharge(self):
ufrom = self.sys.getSolutionAt(self.nodes[0]).real
uto = self.sys.getSolutionAt(self.nodes[1]).real
V = (ufrom-uto)
qb = self.superComponent.IT.getqb()
return self.TFF(V)*self.superComponent.IT.itf/qb
@jit
def dQdU_A(self):
ufrom = self.sys.getSolutionAt(self.nodes[0]).real
uto = self.sys.getSolutionAt(self.nodes[1]).real
V = (ufrom-uto)
return np.abs((self.TFF(V+self.diffh)*self.superComponent.IT.itf/self.superComponent.IT.getqb()-self.getCharge())/self.diffh)
def reloadParams(self):
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec(variableExpr)
self.UT = eval(self.paramDict.get("ut", "0.026"))
self.TF = eval(self.paramDict.get("tf", "2.67E-13"))
self.QTF = eval(self.paramDict.get("qtf", "1E-18"))
self.XTF = eval(self.paramDict.get("xtf", "20"))
self.VTF = eval(self.paramDict.get("vtf", "10"))
self.ITF = eval(self.paramDict.get("itf", "0.1"))
class QDBC(Charge):
def __init__(self, nodes, name, value, superComponent, **kwargs):
super(QDBC, self).__init__(nodes, name, value, superComponent,**kwargs)
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec(variableExpr)
self.TR = eval(self.paramDict.get("tr", "5E-12"))
def getCharge(self):
return self.TR*self.superComponent.IT.itr
@jit
def dQdU_A(self):
return np.abs(self.TR*self.superComponent.ditr_A())
def reloadParams(self):
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec(variableExpr)
self.TR = eval(self.paramDict.get("tr", "5E-12"))
class QDBEP(Charge):
def __init__(self, nodes, name, value, superComponent, **kwargs):
self.PCS = ParasitTransportCurrent(['0','0','0'], "0", "0", None,**kwargs)
super(QDBEP, self).__init__(nodes, name, value, superComponent,**kwargs)
if self.PCS.name == "0":
print (name+" ERROR: ParasitCurrentSource has to be set!")
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec(variableExpr)
self.TR = eval(self.paramDict.get("tr", "5E-12"))
def getCharge(self):
return [self.TR*self.PCS.itfp]
def dQdU_A(self):
return self.TR*self.PCS.ditrp_A()
def parseArgs(self, **kwargs):
super(QDBEP,self).parseArgs(**kwargs)
for name, value in kwargs.items():
if name == 'ParasitCurSource':
self.PCS = value
def reloadParams(self):
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec(variableExpr)
self.TR = eval(self.paramDict.get("tr", "5E-12")) | {
"content_hash": "180714e3e250561196abd5d37a196c17",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 145,
"avg_line_length": 35.16101694915254,
"alnum_prop": 0.5994215473608099,
"repo_name": "tmaiwald/OSIM",
"id": "445b6ed4cd6754303b1ea18b4d46f6b508b54383",
"size": "4149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OSIM/Modeling/Components/NPN_Vertical_Bipolar_Intercompany_Model/VBIC_Charges/VBIC_DiffusionCharges.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "75"
},
{
"name": "Python",
"bytes": "222991"
}
],
"symlink_target": ""
} |
"""
# Relative Path Markdown Extension
During the MkDocs build we rewrite URLs that link to local
Markdown or media files. Using the following pages configuration
we can look at how the output is changed.
pages:
- ['index.md']
- ['tutorial/install.md']
- ['tutorial/intro.md']
## Markdown URLs
When linking from `install.md` to `intro.md` the link would
simply be `[intro](intro.md)`. However, when we build
`install.md` we place it in a directory to create nicer URLs.
This means that the path to `intro.md` becomes `../intro/`
## Media URLs
To make it easier to work with media files and store them all
under one directory we re-write those to all be based on the
root. So, with the following markdown to add an image.

The output would depend on the location of the Markdown file it
was added too.
Source file | Generated Path | Image Path |
------------------- | ----------------- | ---------------------------- |
index.md | / | ./img/initial-layout.png |
tutorial/install.md | tutorial/install/ | ../img/initial-layout.png |
tutorial/intro.md | tutorial/intro/ | ../../img/initial-layout.png |
"""
from __future__ import print_function
from markdown.extensions import Extension
from markdown.treeprocessors import Treeprocessor
from mkdocs import utils
from mkdocs.compat import urlparse, urlunparse
from mkdocs.exceptions import MarkdownNotFound
def _iter(node):
# TODO: Remove when dropping Python 2.6. Replace this
# function call with note.iter()
return [node] + node.findall('.//*')
def path_to_url(url, nav, strict):
scheme, netloc, path, params, query, fragment = urlparse(url)
if scheme or netloc or not path:
# Ignore URLs unless they are a relative link to a markdown file.
return url
if nav and not utils.is_markdown_file(path):
path = utils.create_relative_media_url(nav, path)
elif nav:
# If the site navigation has been provided, then validate
# the internal hyperlink, making sure the target actually exists.
target_file = nav.file_context.make_absolute(path)
if target_file not in nav.source_files:
source_file = nav.file_context.current_file
msg = (
'The page "%s" contained a hyperlink to "%s" which '
'is not listed in the "pages" configuration.'
) % (source_file, target_file)
# In strict mode raise an error at this point.
if strict:
raise MarkdownNotFound(msg)
# Otherwise, when strict mode isn't enabled, print out a warning
# to the user and leave the URL as it is.
print(msg)
return url
path = utils.get_url_path(target_file, nav.use_directory_urls)
path = nav.url_context.make_relative(path)
else:
path = utils.get_url_path(path).lstrip('/')
# Convert the .md hyperlink to a relative hyperlink to the HTML page.
url = urlunparse((scheme, netloc, path, params, query, fragment))
return url
class RelativePathTreeprocessor(Treeprocessor):
def __init__(self, site_navigation, strict):
self.site_navigation = site_navigation
self.strict = strict
def run(self, root):
"""Update urls on anchors and images to make them relative
Iterates through the full document tree looking for specific
tags and then makes them relative based on the site navigation
"""
for element in _iter(root):
if element.tag == 'a':
key = 'href'
elif element.tag == 'img':
key = 'src'
else:
continue
url = element.get(key)
new_url = path_to_url(url, self.site_navigation, self.strict)
element.set(key, new_url)
return root
class RelativePathExtension(Extension):
"""
The Extension class is what we pass to markdown, it then
registers the Treeprocessor.
"""
def __init__(self, site_navigation, strict):
self.site_navigation = site_navigation
self.strict = strict
def extendMarkdown(self, md, md_globals):
relpath = RelativePathTreeprocessor(self.site_navigation, self.strict)
md.treeprocessors.add("relpath", relpath, "_end")
| {
"content_hash": "c3d6079a5c50e552e0afb8ad8e1f70ba",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 78,
"avg_line_length": 33.515151515151516,
"alnum_prop": 0.6313291139240507,
"repo_name": "cazzerson/mkdocs",
"id": "248ea7bf866cfb5fbc1551983cb518b7e23498ac",
"size": "4424",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mkdocs/relative_path_ext.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "347102"
},
{
"name": "HTML",
"bytes": "103188"
},
{
"name": "JavaScript",
"bytes": "20166"
},
{
"name": "Python",
"bytes": "75741"
},
{
"name": "Shell",
"bytes": "140"
}
],
"symlink_target": ""
} |
"""Use multiprocess to perform COCO metric evaluation.
"""
# copybara:insert import multiprocessing
from REDACTED.mask_rcnn import mask_rcnn_params
from REDACTED.mask_rcnn import segm_utils
# copybara:strip_begin
from REDACTED.REDACTED.multiprocessing import REDACTEDprocess
# copybara:strip_end
# copybara:strip_begin
def REDACTED_post_processing():
"""REDACTED batch-processes the predictions."""
q_in, q_out = REDACTEDprocess.get_user_data()
post_processing(q_in, q_out)
# copybara:strip_end
def post_processing(q_in, q_out):
"""Batch-processes the predictions."""
boxes, masks, image_info = q_in.get()
while boxes is not None:
detections = []
segmentations = []
for i, box in enumerate(boxes):
# Slice out the padding data where score is zero
num = max(1, sum(box[:, 5] > 0))
box = box[:num, :]
segms = segm_utils.segm_results(
masks[i], box[:, 1:5], int(image_info[i][3]), int(image_info[i][4]))
detections.extend(box)
segmentations.append(segms)
q_out.put((detections, segmentations))
boxes, masks, image_info = q_in.get()
# signal the parent process that we have completed all work.
q_out.put((None, None))
def update_eval_metric(q_out, eval_metric, exited_process):
detections, segmentations = q_out.get()
if detections is None and segmentations is None:
exited_process += 1
else:
eval_metric.update(detections, segmentations)
return exited_process
def eval_multiprocessing(predictions,
eval_metric,
eval_worker_count,
queue_size=mask_rcnn_params.QUEUE_SIZE):
"""Enables multiprocessing to update eval metrics."""
# copybara:strip_begin
q_in, q_out = REDACTEDprocess.get_user_data()
processes = [
REDACTEDprocess.Process(target=REDACTED_post_processing)
for _ in range(eval_worker_count)
]
# copybara:strip_end_and_replace_begin
# q_in = multiprocessing.Queue(maxsize=queue_size)
# q_out = multiprocessing.Queue(maxsize=queue_size)
# processes = [
# multiprocessing.Process(target=post_processing, args=(q_in, q_out))
# for _ in range(eval_worker_count)
# ]
# copybara:replace_end
for p in processes:
p.start()
# TODO(b/129410706): investigate whether threading improves speed.
# Every predictor.next() gets a batch of prediction (a dictionary).
exited_process = 0
samples = len(predictions['detections']) // eval_worker_count
for i in range(eval_worker_count):
while q_in.full() or q_out.qsize() > queue_size // 4:
exited_process = update_eval_metric(q_out, eval_metric, exited_process)
q_in.put((predictions['detections'][i * samples:(i + 1) * samples],
predictions['mask_outputs'][i * samples:(i + 1) * samples],
predictions['image_info'][i * samples:(i + 1) * samples]))
# Adds empty items to signal the children to quit.
for _ in processes:
q_in.put((None, None, None))
# Cleans up q_out and waits for all the processes to finish work.
while not q_out.empty() or exited_process < eval_worker_count:
exited_process = update_eval_metric(q_out, eval_metric, exited_process)
for p in processes:
# actively terminate all processes (to work around the multiprocessing
# deadlock issue in Cloud)
# copybara:insert p.terminate()
p.join()
| {
"content_hash": "681e95e77f505e84636c093dd70b34d3",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 78,
"avg_line_length": 35.819148936170215,
"alnum_prop": 0.6721116721116721,
"repo_name": "mlperf/training_results_v0.7",
"id": "b4d8d03b60d64f6d92a14bd5d4169e3822ec1a39",
"size": "4040",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Google/benchmarks/maskrcnn/implementations/maskrcnn-research-TF-tpu-v3-1024/eval_multiprocess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
} |
import argparse
import subprocess
import yaml
import tempfile
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default="",
dest='configfile', required=True,
help='Jenkins configuration file')
parser.add_argument('--whitelist', type=str, default="",
dest='whitelist', required=True,
help='List of users to put in the ghprb white list')
parser.add_argument('--jobs', type=str, default="",
dest='jobs', required=True,
help='Jobs definitions file')
args = parser.parse_args()
whitelist = open(args.whitelist).read().split()
jobs = open(args.jobs).read()
jobs = jobs.replace("white-list: []", "white-list: " + repr(whitelist))
tmpjobs = tempfile.NamedTemporaryFile(mode="wt")
tmpjobs.write(jobs)
tmpjobs.flush()
subprocess.call(["jenkins-jobs", "--conf", args.configfile, "update", tmpjobs.name])
| {
"content_hash": "7b200ade58c57860d5d921055d683877",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 84,
"avg_line_length": 36.23076923076923,
"alnum_prop": 0.6369426751592356,
"repo_name": "lebauce/skydive",
"id": "b2141836df7efe46f1a10e743d962e7beb0fef0b",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/ci/jobs/update-jobs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "41340"
},
{
"name": "CSS",
"bytes": "61274"
},
{
"name": "Dockerfile",
"bytes": "424"
},
{
"name": "Go",
"bytes": "2405459"
},
{
"name": "HTML",
"bytes": "7593"
},
{
"name": "JavaScript",
"bytes": "281449"
},
{
"name": "Makefile",
"bytes": "22315"
},
{
"name": "Nix",
"bytes": "818"
},
{
"name": "Python",
"bytes": "113941"
},
{
"name": "Roff",
"bytes": "6623"
},
{
"name": "Ruby",
"bytes": "21247"
},
{
"name": "Shell",
"bytes": "107456"
},
{
"name": "Smarty",
"bytes": "3289"
},
{
"name": "TypeScript",
"bytes": "26297"
}
],
"symlink_target": ""
} |
import rospy
import time
import bson
from rosbridge_library.internal.exceptions import InvalidArgumentException
from rosbridge_library.internal.exceptions import MissingArgumentException
#from rosbridge_library.internal.pngcompression import encode
from rosbridge_library.capabilities.fragmentation import Fragmentation
from rosbridge_library.util import json
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def has_binary(d):
if type(d)==bson.Binary:
return True
if type(d)==dict:
for k,v in d.iteritems():
if has_binary(v):
return True
return False
class Protocol:
""" The interface for a single client to interact with ROS.
See rosbridge_protocol for the default protocol used by rosbridge
The lifecycle for a Protocol instance is as follows:
- Pass incoming messages from the client to incoming
- Propagate outgoing messages to the client by overriding outgoing
- Call finish to clean up resources when the client is finished
"""
# fragment_size can be set per client (each client has its own instance of protocol)
# ..same for other parameters
fragment_size = None
png = None
# buffer used to gather partial JSON-objects (could be caused by small tcp-buffers or similar..)
buffer = ""
old_buffer = ""
busy = False
# if this is too low, ("simple")clients network stacks will get flooded (when sending fragments of a huge message..)
# .. depends on message_size/bandwidth/performance/client_limits/...
# !! this might be related to (or even be avoided by using) throttle_rate !!
delay_between_messages = 0
# global list of non-ros advertised services
external_service_list = {}
parameters = None
def __init__(self, client_id):
""" Keyword arguments:
client_id -- a unique ID for this client to take. Uniqueness is
important otherwise there will be conflicts between multiple clients
with shared resources
"""
self.client_id = client_id
self.capabilities = []
self.operations = {}
if self.parameters:
self.fragment_size = self.parameters["max_message_size"]
self.delay_between_messages = self.parameters["delay_between_messages"]
# added default message_string="" to allow recalling incoming until buffer is empty without giving a parameter
# --> allows to get rid of (..or minimize) delay between client-side sends
def incoming(self, message_string=""):
""" Process an incoming message from the client
Keyword arguments:
message_string -- the wire-level message sent by the client
"""
self.buffer = self.buffer + message_string
msg = None
# take care of having multiple JSON-objects in receiving buffer
# ..first, try to load the whole buffer as a JSON-object
try:
msg = self.deserialize(self.buffer)
self.buffer = ""
# if loading whole object fails try to load part of it (from first opening bracket "{" to next closing bracket "}"
# .. this causes Exceptions on "inner" closing brackets --> so I suppressed logging of deserialization errors
except Exception, e:
# TODO: handling of partial/multiple/broken json data in incoming buffer
# this way is problematic when json contains nested json-objects ( e.g. { ... { "config": [0,1,2,3] } ... } )
# .. if outer json is not fully received, stepping through opening brackets will find { "config" : ... } as a valid json object
# .. and pass this "inner" object to rosbridge and throw away the leading part of the "outer" object..
# solution for now:
# .. check for "op"-field. i can still imagine cases where a nested message ( e.g. complete service_response fits into the data field of a fragment..)
# .. would cause trouble, but if a response fits as a whole into a fragment, simply do not pack it into a fragment.
#
# --> from that follows current limitiation:
# fragment data must NOT (!) contain a complete json-object that has an "op-field"
#
# an alternative solution would be to only check from first opening bracket and have a time out on data in input buffer.. (to handle broken data)
opening_brackets = [i for i, letter in enumerate(self.buffer) if letter == '{']
closing_brackets = [i for i, letter in enumerate(self.buffer) if letter == '}']
for start in opening_brackets:
for end in closing_brackets:
try:
msg = self.deserialize(self.buffer[start:end+1])
if msg.get("op",None) != None:
# TODO: check if throwing away leading data like this is okay.. loops look okay..
self.buffer = self.buffer[end+1:len(self.buffer)]
# jump out of inner loop if json-decode succeeded
break
except Exception,e:
# debug json-decode errors with this line
#print e
pass
# if load was successfull --> break outer loop, too.. -> no need to check if json begins at a "later" opening bracket..
if msg != None:
break
# if decoding of buffer failed .. simply return
if msg is None:
return
# process fields JSON-message object that "control" rosbridge
mid = None
if "id" in msg:
mid = msg["id"]
if "op" not in msg:
if "receiver" in msg:
self.log("error", "Received a rosbridge v1.0 message. Please refer to rosbridge.org for the correct format of rosbridge v2.0 messages. Original message was: %s" % message_string)
else:
self.log("error", "Received a message without an op. All messages require 'op' field with value one of: %s. Original message was: %s" % (self.operations.keys(), message_string), mid)
return
op = msg["op"]
if op not in self.operations:
self.log("error", "Unknown operation: %s. Allowed operations: %s" % (op, self.operations.keys()), mid)
return
# this way a client can change/overwrite it's active values anytime by just including parameter field in any message sent to rosbridge
# maybe need to be improved to bind parameter values to specific operation..
if "fragment_size" in msg.keys():
self.fragment_size = msg["fragment_size"]
#print "fragment size set to:", self.fragment_size
if "message_intervall" in msg.keys() and is_number(msg["message_intervall"]):
self.delay_between_messages = msg["message_intervall"]
if "png" in msg.keys():
self.png = msg["msg"]
# now try to pass message to according operation
try:
self.operations[op](msg)
except Exception as exc:
self.log("error", "%s: %s" % (op, str(exc)), mid)
# if anything left in buffer .. re-call self.incoming
# TODO: check what happens if we have "garbage" on tcp-stack --> infinite loop might be triggered! .. might get out of it when next valid JSON arrives since only data after last 'valid' closing bracket is kept
if len(self.buffer) > 0:
# try to avoid infinite loop..
if self.old_buffer != self.buffer:
self.old_buffer = self.buffer
self.incoming()
def outgoing(self, message):
""" Pass an outgoing message to the client. This method should be
overridden.
Keyword arguments:
message -- the wire-level message to send to the client
"""
pass
def send(self, message, cid=None):
""" Called internally in preparation for sending messages to the client
This method pre-processes the message then passes it to the overridden
outgoing method.
Keyword arguments:
message -- a dict of message values to be marshalled and sent
cid -- (optional) an associated id
"""
serialized = self.serialize(message, cid)
if serialized is not None:
if self.png == "png":
# TODO: png compression on outgoing messages
# encode message
pass
fragment_list = None
if self.fragment_size != None and len(serialized) > self.fragment_size:
mid = message.get("id", None)
# TODO: think about splitting into fragments that have specified size including header-fields!
# --> estimate header size --> split content into fragments that have the requested overall size, rather than requested content size
fragment_list = Fragmentation(self).fragment(message, self.fragment_size, mid )
# fragment list not empty -> send fragments
if fragment_list != None:
for fragment in fragment_list:
self.outgoing(json.dumps(fragment))
# okay to use delay here (sender's send()-function) because rosbridge is sending next request only to service provider when last one had finished)
# --> if this was not the case this delay needed to be implemented in service-provider's (meaning message receiver's) send_message()-function in rosbridge_tcp.py)
time.sleep(self.delay_between_messages)
# else send message as it is
else:
self.outgoing(serialized)
time.sleep(self.delay_between_messages)
def finish(self):
""" Indicate that the client is finished and clean up resources.
All clients should call this method after disconnecting.
"""
for capability in self.capabilities:
capability.finish()
def serialize(self, msg, cid=None):
""" Turns a dictionary of values into the appropriate wire-level
representation.
Default behaviour uses JSON. Override to use a different container.
Keyword arguments:
msg -- the dictionary of values to serialize
cid -- (optional) an ID associated with this. Will be logged on err.
Returns a JSON string representing the dictionary
"""
try:
# if has_binary(msg):
# return bson.BSON.encode(msg)
# else:
# return json.dumps(msg)
return bson.BSON.encode(msg)
except:
if cid is not None:
# Only bother sending the log message if there's an id
self.log("error", "Unable to serialize %s message to client"
% msg["op"], cid)
return None
def deserialize(self, msg, cid=None):
""" Turns the wire-level representation into a dictionary of values
Default behaviour assumes JSON. Override to use a different container.
Keyword arguments:
msg -- the wire-level message to deserialize
cid -- (optional) an ID associated with this. Is logged on error
Returns a dictionary of values
"""
try:
return bson.BSON.decode(bson.BSON(msg))
#return json.loads(msg)
except Exception, e:
# if we did try to deserialize whole buffer .. first try to let self.incoming check for multiple/partial json-decodes before logging error
# .. this means, if buffer is not == msg --> we tried to decode part of buffer
# TODO: implement a way to have a final Exception when nothing works out to decode (multiple/broken/partial JSON..)
# supressed logging of exception on json-decode to keep rosbridge-logs "clean", otherwise console logs would get spammed for every failed json-decode try
# if msg != self.buffer:
# error_msg = "Unable to deserialize message from client: %s" % msg
# error_msg += "\nException was: " +str(e)
#
# self.log("error", error_msg, cid)
# re-raise Exception to allow handling outside of deserialize function instead of returning None
raise
#return None
def register_operation(self, opcode, handler):
""" Register a handler for an opcode
Keyword arguments:
opcode -- the opcode to register this handler for
handler -- a callback function to call for messages with this opcode
"""
self.operations[opcode] = handler
def unregister_operation(self, opcode):
""" Unregister a handler for an opcode
Keyword arguments:
opcode -- the opcode to unregister the handler for
"""
if opcode in self.operations:
del self.operations[opcode]
def add_capability(self, capability_class):
""" Add a capability to the protocol.
This method is for convenience; assumes the default capability
constructor
Keyword arguments:
capability_class -- the class of the capability to add
"""
self.capabilities.append(capability_class(self))
def log(self, level, message, lid=None):
""" Log a message to the client. By default just sends to stdout
Keyword arguments:
level -- the logger level of this message
message -- the string message to send to the user
lid -- an associated for this log message
"""
stdout_formatted_msg = None
if lid is not None:
stdout_formatted_msg = "[Client %s] [id: %s] %s" % (self.client_id, lid, message)
else:
stdout_formatted_msg = "[Client %s] %s" % (self.client_id, message)
if level == "error" or level == "err":
rospy.logerr(stdout_formatted_msg)
elif level == "warning" or level == "warn":
rospy.logwarn(stdout_formatted_msg)
elif level == "info" or level == "information":
rospy.loginfo(stdout_formatted_msg)
else:
rospy.logdebug(stdout_formatted_msg)
| {
"content_hash": "932107e0874d28723bba22fd4a590930",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 217,
"avg_line_length": 42.58702064896755,
"alnum_prop": 0.609752718708873,
"repo_name": "SNU-Sigma/rosbridge_suite",
"id": "fadf587fbfdfdc64a5de06a20e8cdecf43b941dd",
"size": "16042",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "rosbridge_library/src/rosbridge_library/protocol.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1070"
},
{
"name": "CMake",
"bytes": "2773"
},
{
"name": "HTML",
"bytes": "25"
},
{
"name": "Python",
"bytes": "1508982"
},
{
"name": "Shell",
"bytes": "379"
}
],
"symlink_target": ""
} |
import logging
from fuel_upgrade.actions import ActionManager
from fuel_upgrade.engines.base import UpgradeEngine
from fuel_upgrade.utils import get_required_size_for_actions
logger = logging.getLogger(__name__)
class BootstrapUpgrader(UpgradeEngine):
"""Bootstrap Upgrader.
"""
def __init__(self, *args, **kwargs):
super(BootstrapUpgrader, self).__init__(*args, **kwargs)
#: an action manager instance
self._action_manager = ActionManager(self.config.bootstrap['actions'])
def upgrade(self):
logger.info('bootstrap upgrader: starting...')
self._action_manager.do()
logger.info('bootstrap upgrader: done')
def rollback(self):
logger.info('bootstrap upgrader: rollbacking...')
self._action_manager.undo()
logger.info('bootstrap upgrader: rollbacked')
@property
def required_free_space(self):
return get_required_size_for_actions(
self.config.bootstrap['actions'], self.config.update_path)
def on_success(self):
"""Do nothing for this engine
"""
| {
"content_hash": "f0ddbb6cb3bf12fd41ffc4cab88b546b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.6596906278434941,
"repo_name": "Axam/nsx-web",
"id": "c32acb24b2571213b1848c814d9e7ebe8856486a",
"size": "1734",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "fuel_upgrade_system/fuel_upgrade/fuel_upgrade/engines/bootstrap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99402"
},
{
"name": "JavaScript",
"bytes": "553275"
},
{
"name": "Python",
"bytes": "2623980"
},
{
"name": "Ruby",
"bytes": "33345"
},
{
"name": "Shell",
"bytes": "29681"
}
],
"symlink_target": ""
} |
from django.forms import forms
from django.utils.translation import ugettext as _
class SupervisionDocumentForm(forms.Form):
document = forms.FileField(
label=_('Upload a file'),
)
| {
"content_hash": "81ce43624fe6556894c4458d0bc7b06c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 50,
"avg_line_length": 24.875,
"alnum_prop": 0.7185929648241206,
"repo_name": "puttarajubr/commcare-hq",
"id": "f8d7c78888988e07760ff3ed344c318dd8d13f43",
"size": "199",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "custom/ilsgateway/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
"""
Convert an SVG to an EPS file
http://stackoverflow.com/questions/9793120/how-to-covert-svg-to-eps-in-ghostscript
"""
import optparse
import os
import re
import tempfile
if __name__ == "__main__":
parser=optparse.OptionParser()
parser.add_option("--remove","-r",help="Remove .ps file after conversion?",default=False,action="store_true")
# parser.add_option("--multipage","-m",help="Convert to multiple png files?",default=False,action="store_true")
parser.add_option("--silent","-s",help="Be quiet? Default False",default=False,action="store_true")
parser.add_option("--verbose","-v",help="Be loud? Default True",default=1)
parser.add_option("--silence_gs",help="Silence Ghostscript? Default True",default=True)
# parser.add_option("--resolution",help="Resolution (pixels per inch) of png. Default 300",default=300)
parser.add_option("--noepscrop",help="No EPS crop? Default False",default=False,action='store_true')
parser.add_option("--outfile",help="Outfile name?",default=None)
options,args = parser.parse_args()
verbose = not(options.silent) and options.verbose
if verbose > 1:
print "Args: ",args
print "Options: ",options
for filename in args:
if options.noepscrop: epscrop=""
else: epscrop = "-dEPSCrop"
# if options.multipage:
# outfile = re.sub("\.e?ps","_%d.png",filename)
# else:
# outfile = re.sub("\.e?ps",".png",filename)
if options.outfile is None:
outfile = re.sub("\.svg$",".eps",filename)
else:
outfile = options.outfile
ps_outfile = re.sub("\.eps",".ps",outfile)
print "outfile: %s ps_outfile: %s" % (outfile,ps_outfile)
command1 = "gsvg -dNOPAUSE -sDEVICE=ps2write -sOutputFile=%s" % (ps_outfile)
command2 = "ps2eps -f %s" % (ps_outfile)
#command2 = "gs -dBATCH -dNOPAUSE -sDEVICE=epswrite %s" % (epscrop)
# command = "gs -dBATCH -sDEVICE=png16m -r%i %s -dNOPAUSE" % (options.resolution,epscrop)
command1 += " %s" % filename
#command2 += " -sOutputFile=%s %s" % (outfile, ps_outfile)
if options.silence_gs:
command1 += " > /dev/null"
command2 += " > /dev/null"
if verbose:
print command1
print command2
status = os.system(command1)
status = os.system(command2)
if options.remove:
if verbose > 1: print "rm %s" % filename
os.remove(filename)
#gsvg \
# -dNOPAUSE \
# -sDEVICE=ps2write \
# -sOutputFile=my.ps \
# my.svg
#
#gs \
# -dNOPAUSE \
# -sDEVICE=epswrite \
# -sOutputFile=my.eps \
# my.ps
| {
"content_hash": "7511b54dbaaac3e712aa3e6b017e9438",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 114,
"avg_line_length": 31.83529411764706,
"alnum_prop": 0.6016260162601627,
"repo_name": "tectronics/agpy",
"id": "20291a283b5cc9b3a39647b1cced7297c8ede799",
"size": "2728",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "agpy/svg2eps.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11150"
},
{
"name": "Common Lisp",
"bytes": "6644"
},
{
"name": "Python",
"bytes": "882914"
},
{
"name": "Shell",
"bytes": "11548"
},
{
"name": "TeX",
"bytes": "1462"
},
{
"name": "VimL",
"bytes": "1189"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import compas
from .bbox import * # noqa: F401 F403
if not compas.IPY:
from .bbox_numpy import * # noqa: F401 F403
__all__ = [name for name in dir() if not name.startswith("_")]
| {
"content_hash": "e989a1794245826e8aa7eadf68fa139c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 22.923076923076923,
"alnum_prop": 0.6845637583892618,
"repo_name": "compas-dev/compas",
"id": "57c9d4303daa08887c2b933e8b3d9e2d65036465",
"size": "298",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/compas/geometry/bbox/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3181804"
}
],
"symlink_target": ""
} |
import webapp2
from pages import author_list
class AuthorPage(webapp2.RequestHandler):
"""The jlindber author home page of the GiR App Labs at AAMU app."""
def get(self):
"""HTTP GET handler for the tlarsen Users page."""
self.response.headers['Content-Type'] = 'text/plain'
self.response.write("Wassup, it's Justin!")
author_list.Page.add_author('jlindber', AuthorPage)
| {
"content_hash": "00a4dcaf01bcd5bc02bc8d862d3d8973",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 24.176470588235293,
"alnum_prop": 0.683698296836983,
"repo_name": "GIR-at-AAMU/gir_app_labs_at_aamu",
"id": "1a086612c1548ddc138ca2dee8f37a8e0a300b4e",
"size": "1020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pages/authors/jlindber.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "957"
},
{
"name": "HTML",
"bytes": "6033"
},
{
"name": "Python",
"bytes": "43441"
}
],
"symlink_target": ""
} |
from multiverse.msgsys import *
from multiverse.server.plugins import *
from multiverse.server.engine import *
False=0
True=1
voicePlugin = VoicePlugin();
Engine.registerPlugin(voicePlugin);
| {
"content_hash": "6852948d0fe6db0efad208595576aa6b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 39,
"avg_line_length": 19.4,
"alnum_prop": 0.7989690721649485,
"repo_name": "longde123/MultiversePlatform",
"id": "1df337507a8d5eacba222abb667f1c6b28c6625b",
"size": "1412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/config/common/voice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1148"
},
{
"name": "Batchfile",
"bytes": "56002"
},
{
"name": "C",
"bytes": "2958956"
},
{
"name": "C#",
"bytes": "11292123"
},
{
"name": "C++",
"bytes": "428039"
},
{
"name": "CSS",
"bytes": "107446"
},
{
"name": "Groff",
"bytes": "3653"
},
{
"name": "HTML",
"bytes": "767415"
},
{
"name": "Inno Setup",
"bytes": "2093"
},
{
"name": "Java",
"bytes": "4444010"
},
{
"name": "JavaScript",
"bytes": "115349"
},
{
"name": "Makefile",
"bytes": "35639"
},
{
"name": "Matlab",
"bytes": "2076"
},
{
"name": "Objective-C",
"bytes": "44581"
},
{
"name": "Perl",
"bytes": "6299"
},
{
"name": "Python",
"bytes": "4648545"
},
{
"name": "Scheme",
"bytes": "48864"
},
{
"name": "Shell",
"bytes": "880494"
},
{
"name": "XSLT",
"bytes": "1834"
}
],
"symlink_target": ""
} |
from scrapy.item import Item, Field
class KrakItem(Item):
# define the fields for your item here like:
# name = Field()
# pass#
company_name = Field()
company_site_url = Field()
short_description = Field()
address = Field()
phone = Field()
phone_type = Field()
gen_description = Field()
description_headers = Field()
description_paragraphs = Field()
tags = Field()
category = Field()
| {
"content_hash": "260a4e2fb4d695380cf06206e8af0290",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 48,
"avg_line_length": 24.77777777777778,
"alnum_prop": 0.6165919282511211,
"repo_name": "0--key/lib",
"id": "888a09b848fdd84015221d8d652297a6bccb8e05",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "portfolio/2011_krakDK/krak/items.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28210"
},
{
"name": "Emacs Lisp",
"bytes": "76390"
},
{
"name": "HTML",
"bytes": "1136671"
},
{
"name": "JavaScript",
"bytes": "27718"
},
{
"name": "PHP",
"bytes": "378537"
},
{
"name": "Python",
"bytes": "1892998"
},
{
"name": "Shell",
"bytes": "4030"
}
],
"symlink_target": ""
} |
"""Test listing users.
"""
import datetime
import logging
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer.tests import api as tests_api
from ceilometer.tests import db as tests_db
LOG = logging.getLogger(__name__)
class TestListEmptyProjects(tests_api.TestBase,
tests_db.MixinTestsWithBackendScenarios):
def test_empty(self):
data = self.get('/projects')
self.assertEqual({'projects': []}, data)
class TestListProjects(tests_api.TestBase,
tests_db.MixinTestsWithBackendScenarios):
def setUp(self):
super(TestListProjects, self).setUp()
sample1 = sample.Sample(
'instance',
'cumulative',
'instance',
1,
'user-id',
'project-id',
'resource-id',
timestamp=datetime.datetime(2012, 7, 2, 10, 40),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample'},
source='test_list_projects',
)
msg = utils.meter_message_from_counter(
sample1,
self.CONF.publisher.metering_secret,
)
self.conn.record_metering_data(msg)
sample2 = sample.Sample(
'instance',
'cumulative',
'instance',
1,
'user-id2',
'project-id2',
'resource-id-alternate',
timestamp=datetime.datetime(2012, 7, 2, 10, 41),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample2'},
source='test_list_users',
)
msg2 = utils.meter_message_from_counter(
sample2,
self.CONF.publisher.metering_secret,
)
self.conn.record_metering_data(msg2)
def test_projects(self):
data = self.get('/projects')
self.assertEqual(['project-id', 'project-id2'], data['projects'])
def test_projects_non_admin(self):
data = self.get('/projects',
headers={"X-Roles": "Member",
"X-Project-Id": "project-id"})
self.assertEqual(['project-id'], data['projects'])
def test_with_source(self):
data = self.get('/sources/test_list_users/projects')
self.assertEqual(['project-id2'], data['projects'])
def test_with_source_non_admin(self):
data = self.get('/sources/test_list_users/projects',
headers={"X-Roles": "Member",
"X-Project-Id": "project-id2"})
self.assertEqual(['project-id2'], data['projects'])
| {
"content_hash": "e1d662e4aba8a730e8cfcbf2d16cf200",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 73,
"avg_line_length": 31.847058823529412,
"alnum_prop": 0.5411895086811969,
"repo_name": "nttdata-osscloud/ceilometer",
"id": "572cc99fd6e7e932337e492af794c0d4ce0d02f8",
"size": "3438",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ceilometer/tests/api/v1/test_list_projects_scenarios.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import email.mime.multipart
import email.mime.text
import logging
import os.path
import pickle
import re
import smtplib
import subprocess
import sys
from datetime import datetime, timedelta
from phabricator import Phabricator
# Setting up a virtualenv to run this script can be done by running the
# following commands:
# $ virtualenv venv
# $ . ./venv/bin/activate
# $ pip install Phabricator
GIT_REPO_METADATA = (("llvm", "https://llvm.org/git/llvm.git"), )
# The below PhabXXX classes represent objects as modelled by Phabricator.
# The classes can be serialized to disk, to try and make sure that we don't
# needlessly have to re-fetch lots of data from Phabricator, as that would
# make this script unusably slow.
class PhabObject:
OBJECT_KIND = None
def __init__(self, id):
self.id = id
class PhabObjectCache:
def __init__(self, PhabObjectClass):
self.PhabObjectClass = PhabObjectClass
self.most_recent_info = None
self.oldest_info = None
self.id2PhabObjects = {}
def get_name(self):
return self.PhabObjectClass.OBJECT_KIND + "sCache"
def get(self, id):
if id not in self.id2PhabObjects:
self.id2PhabObjects[id] = self.PhabObjectClass(id)
return self.id2PhabObjects[id]
def get_ids_in_cache(self):
return list(self.id2PhabObjects.keys())
def get_objects(self):
return list(self.id2PhabObjects.values())
DEFAULT_DIRECTORY = "PhabObjectCache"
def _get_pickle_name(self, directory):
file_name = "Phab" + self.PhabObjectClass.OBJECT_KIND + "s.pickle"
return os.path.join(directory, file_name)
def populate_cache_from_disk(self, directory=DEFAULT_DIRECTORY):
"""
FIXME: consider if serializing to JSON would bring interoperability
advantages over serializing to pickle.
"""
try:
f = open(self._get_pickle_name(directory), "rb")
except IOError as err:
print("Could not find cache. Error message: {0}. Continuing..."
.format(err))
else:
with f:
try:
d = pickle.load(f)
self.__dict__.update(d)
except EOFError as err:
print("Cache seems to be corrupt. " +
"Not using cache. Error message: {0}".format(err))
def write_cache_to_disk(self, directory=DEFAULT_DIRECTORY):
if not os.path.exists(directory):
os.makedirs(directory)
with open(self._get_pickle_name(directory), "wb") as f:
pickle.dump(self.__dict__, f)
print("wrote cache to disk, most_recent_info= {0}".format(
datetime.fromtimestamp(self.most_recent_info)
if self.most_recent_info is not None else None))
class PhabReview(PhabObject):
OBJECT_KIND = "Review"
def __init__(self, id):
PhabObject.__init__(self, id)
def update(self, title, dateCreated, dateModified, author):
self.title = title
self.dateCreated = dateCreated
self.dateModified = dateModified
self.author = author
def setPhabDiffs(self, phabDiffs):
self.phabDiffs = phabDiffs
class PhabUser(PhabObject):
OBJECT_KIND = "User"
def __init__(self, id):
PhabObject.__init__(self, id)
def update(self, phid, realName):
self.phid = phid
self.realName = realName
class PhabHunk:
def __init__(self, rest_api_hunk):
self.oldOffset = int(rest_api_hunk["oldOffset"])
self.oldLength = int(rest_api_hunk["oldLength"])
# self.actual_lines_changed_offset will contain the offsets of the
# lines that were changed in this hunk.
self.actual_lines_changed_offset = []
offset = self.oldOffset
inHunk = False
hunkStart = -1
contextLines = 3
for line in rest_api_hunk["corpus"].split("\n"):
if line.startswith("+"):
# line is a new line that got introduced in this patch.
# Do not record it as a changed line.
if inHunk is False:
inHunk = True
hunkStart = max(self.oldOffset, offset - contextLines)
continue
if line.startswith("-"):
# line was changed or removed from the older version of the
# code. Record it as a changed line.
if inHunk is False:
inHunk = True
hunkStart = max(self.oldOffset, offset - contextLines)
offset += 1
continue
# line is a context line.
if inHunk is True:
inHunk = False
hunkEnd = offset + contextLines
self.actual_lines_changed_offset.append((hunkStart, hunkEnd))
offset += 1
if inHunk is True:
hunkEnd = offset + contextLines
self.actual_lines_changed_offset.append((hunkStart, hunkEnd))
# The above algorithm could result in adjacent or overlapping ranges
# being recorded into self.actual_lines_changed_offset.
# Merge the adjacent and overlapping ranges in there:
t = []
lastRange = None
for start, end in self.actual_lines_changed_offset + \
[(sys.maxsize, sys.maxsize)]:
if lastRange is None:
lastRange = (start, end)
else:
if lastRange[1] >= start:
lastRange = (lastRange[0], end)
else:
t.append(lastRange)
lastRange = (start, end)
self.actual_lines_changed_offset = t
class PhabChange:
def __init__(self, rest_api_change):
self.oldPath = rest_api_change["oldPath"]
self.hunks = [PhabHunk(h) for h in rest_api_change["hunks"]]
class PhabDiff(PhabObject):
OBJECT_KIND = "Diff"
def __init__(self, id):
PhabObject.__init__(self, id)
def update(self, rest_api_results):
self.revisionID = rest_api_results["revisionID"]
self.dateModified = int(rest_api_results["dateModified"])
self.dateCreated = int(rest_api_results["dateCreated"])
self.changes = [PhabChange(c) for c in rest_api_results["changes"]]
class ReviewsCache(PhabObjectCache):
def __init__(self):
PhabObjectCache.__init__(self, PhabReview)
class UsersCache(PhabObjectCache):
def __init__(self):
PhabObjectCache.__init__(self, PhabUser)
reviews_cache = ReviewsCache()
users_cache = UsersCache()
def init_phab_connection():
phab = Phabricator()
phab.update_interfaces()
return phab
def update_cached_info(phab, cache, phab_query, order, record_results,
max_nr_entries_per_fetch, max_nr_days_to_cache):
q = phab
LIMIT = max_nr_entries_per_fetch
for query_step in phab_query:
q = getattr(q, query_step)
results = q(order=order, limit=LIMIT)
most_recent_info, oldest_info = record_results(cache, results, phab)
oldest_info_to_fetch = datetime.fromtimestamp(most_recent_info) - \
timedelta(days=max_nr_days_to_cache)
most_recent_info_overall = most_recent_info
cache.write_cache_to_disk()
after = results["cursor"]["after"]
print("after: {0!r}".format(after))
print("most_recent_info: {0}".format(
datetime.fromtimestamp(most_recent_info)))
while (after is not None
and datetime.fromtimestamp(oldest_info) > oldest_info_to_fetch):
need_more_older_data = \
(cache.oldest_info is None or
datetime.fromtimestamp(cache.oldest_info) > oldest_info_to_fetch)
print(("need_more_older_data={0} cache.oldest_info={1} " +
"oldest_info_to_fetch={2}").format(
need_more_older_data,
datetime.fromtimestamp(cache.oldest_info)
if cache.oldest_info is not None else None,
oldest_info_to_fetch))
need_more_newer_data = \
(cache.most_recent_info is None or
cache.most_recent_info < most_recent_info)
print(("need_more_newer_data={0} cache.most_recent_info={1} " +
"most_recent_info={2}")
.format(need_more_newer_data, cache.most_recent_info,
most_recent_info))
if not need_more_older_data and not need_more_newer_data:
break
results = q(order=order, after=after, limit=LIMIT)
most_recent_info, oldest_info = record_results(cache, results, phab)
after = results["cursor"]["after"]
print("after: {0!r}".format(after))
print("most_recent_info: {0}".format(
datetime.fromtimestamp(most_recent_info)))
cache.write_cache_to_disk()
cache.most_recent_info = most_recent_info_overall
if after is None:
# We did fetch all records. Mark the cache to contain all info since
# the start of time.
oldest_info = 0
cache.oldest_info = oldest_info
cache.write_cache_to_disk()
def record_reviews(cache, reviews, phab):
most_recent_info = None
oldest_info = None
for reviewInfo in reviews["data"]:
if reviewInfo["type"] != "DREV":
continue
id = reviewInfo["id"]
# phid = reviewInfo["phid"]
dateModified = int(reviewInfo["fields"]["dateModified"])
dateCreated = int(reviewInfo["fields"]["dateCreated"])
title = reviewInfo["fields"]["title"]
author = reviewInfo["fields"]["authorPHID"]
phabReview = cache.get(id)
if "dateModified" not in phabReview.__dict__ or \
dateModified > phabReview.dateModified:
diff_results = phab.differential.querydiffs(revisionIDs=[id])
diff_ids = sorted(diff_results.keys())
phabDiffs = []
for diff_id in diff_ids:
diffInfo = diff_results[diff_id]
d = PhabDiff(diff_id)
d.update(diffInfo)
phabDiffs.append(d)
phabReview.update(title, dateCreated, dateModified, author)
phabReview.setPhabDiffs(phabDiffs)
print("Updated D{0} modified on {1} ({2} diffs)".format(
id, datetime.fromtimestamp(dateModified), len(phabDiffs)))
if most_recent_info is None:
most_recent_info = dateModified
elif most_recent_info < dateModified:
most_recent_info = dateModified
if oldest_info is None:
oldest_info = dateModified
elif oldest_info > dateModified:
oldest_info = dateModified
return most_recent_info, oldest_info
def record_users(cache, users, phab):
most_recent_info = None
oldest_info = None
for info in users["data"]:
if info["type"] != "USER":
continue
id = info["id"]
phid = info["phid"]
dateModified = int(info["fields"]["dateModified"])
# dateCreated = int(info["fields"]["dateCreated"])
realName = info["fields"]["realName"]
phabUser = cache.get(id)
phabUser.update(phid, realName)
if most_recent_info is None:
most_recent_info = dateModified
elif most_recent_info < dateModified:
most_recent_info = dateModified
if oldest_info is None:
oldest_info = dateModified
elif oldest_info > dateModified:
oldest_info = dateModified
return most_recent_info, oldest_info
PHABCACHESINFO = ((reviews_cache, ("differential", "revision", "search"),
"updated", record_reviews, 5, 7),
(users_cache, ("user", "search"), "newest", record_users,
100, 1000))
def load_cache():
for cache, phab_query, order, record_results, _, _ in PHABCACHESINFO:
cache.populate_cache_from_disk()
print("Loaded {0} nr entries: {1}".format(
cache.get_name(), len(cache.get_ids_in_cache())))
print("Loaded {0} has most recent info: {1}".format(
cache.get_name(),
datetime.fromtimestamp(cache.most_recent_info)
if cache.most_recent_info is not None else None))
def update_cache(phab):
load_cache()
for cache, phab_query, order, record_results, max_nr_entries_per_fetch, \
max_nr_days_to_cache in PHABCACHESINFO:
update_cached_info(phab, cache, phab_query, order, record_results,
max_nr_entries_per_fetch, max_nr_days_to_cache)
ids_in_cache = cache.get_ids_in_cache()
print("{0} objects in {1}".format(len(ids_in_cache), cache.get_name()))
cache.write_cache_to_disk()
def get_most_recent_reviews(days):
newest_reviews = sorted(
reviews_cache.get_objects(), key=lambda r: -r.dateModified)
if len(newest_reviews) == 0:
return newest_reviews
most_recent_review_time = \
datetime.fromtimestamp(newest_reviews[0].dateModified)
cut_off_date = most_recent_review_time - timedelta(days=days)
result = []
for review in newest_reviews:
if datetime.fromtimestamp(review.dateModified) < cut_off_date:
return result
result.append(review)
return result
# All of the above code is about fetching data from Phabricator and caching it
# on local disk. The below code contains the actual "business logic" for this
# script.
_userphid2realname = None
def get_real_name_from_author(user_phid):
global _userphid2realname
if _userphid2realname is None:
_userphid2realname = {}
for user in users_cache.get_objects():
_userphid2realname[user.phid] = user.realName
return _userphid2realname.get(user_phid, "unknown")
def print_most_recent_reviews(phab, days, filter_reviewers):
msgs = []
def add_msg(msg):
msgs.append(msg)
print(msg)
newest_reviews = get_most_recent_reviews(days)
add_msg(u"These are the reviews that look interesting to be reviewed. " +
u"The report below has 2 sections. The first " +
u"section is organized per review; the second section is organized "
+ u"per potential reviewer.\n")
oldest_review = newest_reviews[-1] if len(newest_reviews) > 0 else None
oldest_datetime = \
datetime.fromtimestamp(oldest_review.dateModified) \
if oldest_review else None
add_msg((u"The report below is based on analyzing the reviews that got " +
u"touched in the past {0} days (since {1}). " +
u"The script found {2} such reviews.\n").format(
days, oldest_datetime, len(newest_reviews)))
reviewer2reviews_and_scores = {}
for i, review in enumerate(newest_reviews):
matched_reviewers = find_reviewers_for_review(review)
matched_reviewers = filter_reviewers(matched_reviewers)
if len(matched_reviewers) == 0:
continue
add_msg((u"{0:>3}. https://reviews.llvm.org/D{1} by {2}\n {3}\n" +
u" Last updated on {4}").format(
i, review.id,
get_real_name_from_author(review.author), review.title,
datetime.fromtimestamp(review.dateModified)))
for reviewer, scores in matched_reviewers:
add_msg(u" potential reviewer {0}, score {1}".format(
reviewer,
"(" + "/".join(["{0:.1f}%".format(s) for s in scores]) + ")"))
if reviewer not in reviewer2reviews_and_scores:
reviewer2reviews_and_scores[reviewer] = []
reviewer2reviews_and_scores[reviewer].append((review, scores))
# Print out a summary per reviewer.
for reviewer in sorted(reviewer2reviews_and_scores.keys()):
reviews_and_scores = reviewer2reviews_and_scores[reviewer]
reviews_and_scores.sort(key=lambda rs: rs[1], reverse=True)
add_msg(u"\n\nSUMMARY FOR {0} (found {1} reviews):".format(
reviewer, len(reviews_and_scores)))
for review, scores in reviews_and_scores:
add_msg(u"[{0}] https://reviews.llvm.org/D{1} '{2}' by {3}".format(
"/".join(["{0:.1f}%".format(s) for s in scores]), review.id,
review.title, get_real_name_from_author(review.author)))
return "\n".join(msgs)
def get_git_cmd_output(cmd):
output = None
try:
logging.debug(cmd)
output = subprocess.check_output(
cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.debug(str(e))
if output is None:
return None
return output.decode("utf-8", errors='ignore')
reAuthorMail = re.compile("^author-mail <([^>]*)>.*$")
def parse_blame_output_line_porcelain(blame_output):
email2nr_occurences = {}
if blame_output is None:
return email2nr_occurences
for line in blame_output.split('\n'):
m = reAuthorMail.match(line)
if m:
author_email_address = m.group(1)
if author_email_address not in email2nr_occurences:
email2nr_occurences[author_email_address] = 1
else:
email2nr_occurences[author_email_address] += 1
return email2nr_occurences
def find_reviewers_for_diff_heuristic(diff):
# Heuristic 1: assume good reviewers are the ones that touched the same
# lines before as this patch is touching.
# Heuristic 2: assume good reviewers are the ones that touched the same
# files before as this patch is touching.
reviewers2nr_lines_touched = {}
reviewers2nr_files_touched = {}
# Assume last revision before diff was modified is the revision the diff
# applies to.
git_repo = "git_repos/llvm"
cmd = 'git -C {0} rev-list -n 1 --before="{1}" master'.format(
git_repo,
datetime.fromtimestamp(
diff.dateModified).strftime("%Y-%m-%d %H:%M:%s"))
base_revision = get_git_cmd_output(cmd).strip()
logging.debug("Base revision={0}".format(base_revision))
for change in diff.changes:
path = change.oldPath
# Compute heuristic 1: look at context of patch lines.
for hunk in change.hunks:
for start_line, end_line in hunk.actual_lines_changed_offset:
# Collect git blame results for authors in those ranges.
cmd = ("git -C {0} blame --encoding=utf-8 --date iso -f -e " +
"-w --line-porcelain -L {1},{2} {3} -- {4}").format(
git_repo, start_line, end_line, base_revision, path)
blame_output = get_git_cmd_output(cmd)
for reviewer, nr_occurences in \
parse_blame_output_line_porcelain(blame_output).items():
if reviewer not in reviewers2nr_lines_touched:
reviewers2nr_lines_touched[reviewer] = 0
reviewers2nr_lines_touched[reviewer] += nr_occurences
# Compute heuristic 2: don't look at context, just at files touched.
# Collect git blame results for authors in those ranges.
cmd = ("git -C {0} blame --encoding=utf-8 --date iso -f -e -w " +
"--line-porcelain {1} -- {2}").format(git_repo, base_revision,
path)
blame_output = get_git_cmd_output(cmd)
for reviewer, nr_occurences in parse_blame_output_line_porcelain(
blame_output).items():
if reviewer not in reviewers2nr_files_touched:
reviewers2nr_files_touched[reviewer] = 0
reviewers2nr_files_touched[reviewer] += 1
# Compute "match scores"
total_nr_lines = sum(reviewers2nr_lines_touched.values())
total_nr_files = len(diff.changes)
reviewers_matchscores = \
[(reviewer,
(reviewers2nr_lines_touched.get(reviewer, 0)*100.0/total_nr_lines
if total_nr_lines != 0 else 0,
reviewers2nr_files_touched[reviewer]*100.0/total_nr_files
if total_nr_files != 0 else 0))
for reviewer, nr_lines
in reviewers2nr_files_touched.items()]
reviewers_matchscores.sort(key=lambda i: i[1], reverse=True)
return reviewers_matchscores
def find_reviewers_for_review(review):
# Process the newest diff first.
diffs = sorted(
review.phabDiffs, key=lambda d: d.dateModified, reverse=True)
if len(diffs) == 0:
return
diff = diffs[0]
matched_reviewers = find_reviewers_for_diff_heuristic(diff)
# Show progress, as this is a slow operation:
sys.stdout.write('.')
sys.stdout.flush()
logging.debug(u"matched_reviewers: {0}".format(matched_reviewers))
return matched_reviewers
def update_git_repos():
git_repos_directory = "git_repos"
for name, url in GIT_REPO_METADATA:
dirname = os.path.join(git_repos_directory, name)
if not os.path.exists(dirname):
cmd = "git clone {0} {1}".format(url, dirname)
output = get_git_cmd_output(cmd)
cmd = "git -C {0} pull --rebase".format(dirname)
output = get_git_cmd_output(cmd)
def send_emails(email_addresses, sender, msg):
s = smtplib.SMTP()
s.connect()
for email_address in email_addresses:
email_msg = email.mime.multipart.MIMEMultipart()
email_msg['From'] = sender
email_msg['To'] = email_address
email_msg['Subject'] = 'LLVM patches you may be able to review.'
email_msg.attach(email.mime.text.MIMEText(msg.encode('utf-8'), 'plain'))
# python 3.x: s.send_message(email_msg)
s.sendmail(email_msg['From'], email_msg['To'], email_msg.as_string())
s.quit()
def filter_reviewers_to_report_for(people_to_look_for):
# The below is just an example filter, to only report potential reviews
# to do for the people that will receive the report email.
return lambda potential_reviewers: [r for r in potential_reviewers
if r[0] in people_to_look_for]
def main():
parser = argparse.ArgumentParser(
description='Match open reviews to potential reviewers.')
parser.add_argument(
'--no-update-cache',
dest='update_cache',
action='store_false',
default=True,
help='Do not update cached Phabricator objects')
parser.add_argument(
'--email-report',
dest='email_report',
nargs='*',
default="",
help="A email addresses to send the report to.")
parser.add_argument(
'--sender',
dest='sender',
default="",
help="The email address to use in 'From' on messages emailed out.")
parser.add_argument(
'--email-addresses',
dest='email_addresses',
nargs='*',
help="The email addresses (as known by LLVM git) of " +
"the people to look for reviews for.")
parser.add_argument('--verbose', '-v', action='count')
args = parser.parse_args()
if args.verbose >= 1:
logging.basicConfig(level=logging.DEBUG)
people_to_look_for = [e.decode('utf-8') for e in args.email_addresses]
logging.debug("Will look for reviews that following contributors could " +
"review: {}".format(people_to_look_for))
logging.debug("Will email a report to: {}".format(args.email_report))
phab = init_phab_connection()
if args.update_cache:
update_cache(phab)
load_cache()
update_git_repos()
msg = print_most_recent_reviews(
phab,
days=1,
filter_reviewers=filter_reviewers_to_report_for(people_to_look_for))
if args.email_report != []:
send_emails(args.email_report, args.sender, msg)
if __name__ == "__main__":
main()
| {
"content_hash": "bbb0a5c7ec689a52926b4082f2ce521b",
"timestamp": "",
"source": "github",
"line_count": 633,
"max_line_length": 80,
"avg_line_length": 37.720379146919434,
"alnum_prop": 0.6041378732671608,
"repo_name": "apple/swift-llvm",
"id": "7bfbec8cfde9e5459274fc25dc3140438ef921e5",
"size": "23900",
"binary": false,
"copies": "27",
"ref": "refs/heads/stable",
"path": "utils/Reviewing/find_interesting_reviews.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "47836974"
},
{
"name": "Batchfile",
"bytes": "9003"
},
{
"name": "C",
"bytes": "848808"
},
{
"name": "C++",
"bytes": "82565298"
},
{
"name": "CMake",
"bytes": "523558"
},
{
"name": "CSS",
"bytes": "12605"
},
{
"name": "Dockerfile",
"bytes": "5884"
},
{
"name": "Emacs Lisp",
"bytes": "10692"
},
{
"name": "Go",
"bytes": "144690"
},
{
"name": "HTML",
"bytes": "37873"
},
{
"name": "LLVM",
"bytes": "131753512"
},
{
"name": "Logos",
"bytes": "28"
},
{
"name": "OCaml",
"bytes": "305839"
},
{
"name": "Objective-C",
"bytes": "10229"
},
{
"name": "Pawn",
"bytes": "1572"
},
{
"name": "Perl",
"bytes": "25354"
},
{
"name": "Python",
"bytes": "971620"
},
{
"name": "Roff",
"bytes": "39"
},
{
"name": "Shell",
"bytes": "97233"
},
{
"name": "Swift",
"bytes": "271"
},
{
"name": "Vim script",
"bytes": "17467"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import sys
from pathlib import Path
import yaml
from rich.console import Console
if __name__ not in ("__main__", "__mp_main__"):
raise SystemExit(
"This file is intended to be executed as an executable program. You cannot use it as a module."
f"To run this script, run the ./{__file__} command [FILE] ..."
)
console = Console(color_system="standard", width=200)
def check_file(the_file: Path) -> int:
"""Returns number of wrong checkout instructions in the workflow file"""
error_num = 0
res = yaml.safe_load(the_file.read_text())
console.print(f"Checking file [yellow]{the_file}[/]")
for job in res['jobs'].values():
for step in job['steps']:
uses = step.get('uses')
pretty_step = yaml.safe_dump(step, indent=2)
if uses is not None and uses.startswith('actions/checkout'):
with_clause = step.get('with')
if with_clause is None:
console.print(f"\n[red]The `with` clause is missing in step:[/]\n\n{pretty_step}")
error_num += 1
continue
persist_credentials = with_clause.get("persist-credentials")
if persist_credentials is None:
console.print(
"\n[red]The `with` clause does not have persist-credentials in step:[/]"
f"\n\n{pretty_step}"
)
error_num += 1
continue
else:
if persist_credentials:
console.print(
"\n[red]The `with` clause have persist-credentials=True in step:[/]"
f"\n\n{pretty_step}"
)
error_num += 1
continue
return error_num
if __name__ == '__main__':
total_err_num = 0
for a_file in sys.argv[1:]:
total_err_num += check_file(Path(a_file))
if total_err_num:
console.print(
"""
[red]There are are some checkout instructions in github workflows that have no "persist_credentials"
set to False.[/]
For security reasons - make sure all of the checkout actions have persist_credentials set, similar to:
- name: "Checkout ${{ github.ref }} ( ${{ github.sha }} )"
uses: actions/checkout@v2
with:
persist-credentials: false
"""
)
sys.exit(1)
| {
"content_hash": "881bc1c920e0d5c5d2359baf02841a4c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 103,
"avg_line_length": 34.75,
"alnum_prop": 0.5367705835331734,
"repo_name": "cfei18/incubator-airflow",
"id": "803c3c3b9af9f2dc726007ffac6652420bcd66f8",
"size": "3309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/ci/pre_commit/pre_commit_checkout_no_credentials.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import mock
import pytest
import workflows.contrib.start_service
def test_get_command_line_help(capsys):
'''Running the start_service script with --help should display command line help and exit.'''
with pytest.raises(SystemExit):
workflows.contrib.start_service.ServiceStarter().run(['--help'], program_name='sentinelvalue')
out, err = capsys.readouterr()
assert 'Usage: sentinelvalue' in out
@mock.patch('workflows.contrib.start_service.OptionParser')
@mock.patch('workflows.contrib.start_service.workflows.transport.lookup')
@mock.patch('workflows.contrib.start_service.workflows.frontend')
@mock.patch('workflows.contrib.start_service.workflows.services')
def test_script_initialises_transport_and_starts_frontend(mock_services, mock_frontend, mock_tlookup, mock_parser):
'''Check that the start_service script sets up the transport mechanism and the frontend properly.
Correct service should be selected and the frontend started.'''
mock_options = mock.Mock()
mock_options.service = 'someservice'
mock_options.transport = mock.sentinel.transport
mock_parser.return_value.parse_args.return_value = (mock_options, mock.Mock())
mock_services.get_known_services.return_value = { 'SomeService': None }
workflows.contrib.start_service.ServiceStarter().run(cmdline_args=['-s', 'some'], version=mock.sentinel.version)
mock_tlookup.assert_called_once_with(mock.sentinel.transport)
mock_parser.assert_called_once_with(usage=mock.ANY, version=mock.sentinel.version)
mock_frontend.Frontend.assert_called_once_with(service='SomeService', transport=mock_tlookup.return_value)
mock_frontend.Frontend.return_value.run.assert_called_once_with()
| {
"content_hash": "863f90272ec3ba10580e3511d9ca7854",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 115,
"avg_line_length": 54.4375,
"alnum_prop": 0.777841561423651,
"repo_name": "xia2/workflows",
"id": "2165925d37559a45fab5b711be60d07777f9e4d2",
"size": "1742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workflows/contrib/test_start_service.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "186945"
}
],
"symlink_target": ""
} |
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.StereoCapableWindowOn()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
reader = vtk.vtkGenericEnSightReader()
# Make sure all algorithms use the composite data pipeline
cdp = vtk.vtkCompositeDataPipeline()
reader.SetDefaultExecutivePrototype(cdp)
reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/blow4_bin.case")
reader.SetTimeValue(1.0)
geom = vtk.vtkGeometryFilter()
geom.SetInputConnection(reader.GetOutputPort())
mapper = vtk.vtkHierarchicalPolyDataMapper()
mapper.SetInputConnection(geom.GetOutputPort())
mapper.SetColorModeToMapScalars()
mapper.SetScalarModeToUsePointFieldData()
mapper.ColorByArrayComponent("displacement",0)
mapper.SetScalarRange(0,2.08)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign our actor to the renderer
ren1.AddActor(actor)
# enable user interface interactor
iren.Initialize()
ren1.GetActiveCamera().SetPosition(99.3932,17.6571,-22.6071)
ren1.GetActiveCamera().SetFocalPoint(3.5,12,1.5)
ren1.GetActiveCamera().SetViewAngle(30)
ren1.GetActiveCamera().SetViewUp(0.239617,-0.01054,0.97081)
ren1.ResetCameraClippingRange()
renWin.Render()
# prevent the tk window from showing up then start the event loop
reader.SetDefaultExecutivePrototype(None)
# --- end of script --
| {
"content_hash": "25723385f188fc8fe2bd383cfa404448",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 80,
"avg_line_length": 37.292682926829265,
"alnum_prop": 0.7874427730542839,
"repo_name": "timkrentz/SunTracker",
"id": "17513abc10282222053f2dc214f5925a48cefe48",
"size": "1552",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/IO/EnSight/Testing/Python/EnSightBlow4Bin.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
} |
"""add organization_users table
Revision ID: be985ed5f992
Revises:
Create Date: 2017-02-28 19:51:48.711414
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'be985ed5f992'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('organization_users',
sa.Column('organization_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['organization_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('organization_id', 'user_id')
)
op.add_column('user', sa.Column('gh_type', sa.Enum(
'Organization', 'User'), nullable=True))
def downgrade():
op.drop_column('user', 'gh_type')
op.drop_table('organization_users')
| {
"content_hash": "b6a0be71cd24f8cf31a5c3fba38baa5e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 29.575757575757574,
"alnum_prop": 0.5973360655737705,
"repo_name": "olin-computing/assignment-dashboard",
"id": "d4caff6c34539aa2f0b28d11691f596aa86df0d3",
"size": "976",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "migrations/versions/be985ed5f992_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "224"
},
{
"name": "HTML",
"bytes": "14535"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Nginx",
"bytes": "751"
},
{
"name": "Python",
"bytes": "57546"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
} |
import sys
from pyraf import iraf
def run_imstat(input):
iraf.images()
for image in input:
iraf.imstat(image)
if __name__ == "__main__":
run_imstat(sys.argv[1:])
| {
"content_hash": "114409444d35c3010e9059980e567c59",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 28,
"avg_line_length": 18.4,
"alnum_prop": 0.6086956521739131,
"repo_name": "sniemi/SamPy",
"id": "146d7b0ba315e3d1c592b6e03652859fc26ee4e5",
"size": "207",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sandbox/src2/src/imstat.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "296"
},
{
"name": "C",
"bytes": "68436"
},
{
"name": "C++",
"bytes": "45956"
},
{
"name": "CSS",
"bytes": "35570"
},
{
"name": "Fortran",
"bytes": "45191"
},
{
"name": "HTML",
"bytes": "107435"
},
{
"name": "IDL",
"bytes": "13651"
},
{
"name": "JavaScript",
"bytes": "25435"
},
{
"name": "Makefile",
"bytes": "26035"
},
{
"name": "Matlab",
"bytes": "1508"
},
{
"name": "Perl",
"bytes": "59198"
},
{
"name": "PostScript",
"bytes": "1403536"
},
{
"name": "Prolog",
"bytes": "16061"
},
{
"name": "Python",
"bytes": "5763358"
},
{
"name": "R",
"bytes": "208346"
},
{
"name": "Rebol",
"bytes": "161"
},
{
"name": "Roff",
"bytes": "73616"
},
{
"name": "Ruby",
"bytes": "2032"
},
{
"name": "Shell",
"bytes": "41512"
},
{
"name": "Tcl",
"bytes": "44150"
},
{
"name": "TeX",
"bytes": "107783"
}
],
"symlink_target": ""
} |
__author__ = 'sim'
class Edge:
def __init__(self, parent, child):
"""
:param parent: Node object parent
:param child: Node object child
"""
self.parent = parent
self.child = child
# assert self.parent != self.child, "Child not allowed to be its own parent."
#update nodes
self.parent.add_child(self.child)
self.child.set_parent(self.parent)
def get_parent(self):
return self.parent
def get_child(self):
return self.child
def get_parent_and_child(self):
return {self.parent, self.child} | {
"content_hash": "5d3ba622563ec2e6a46e48f0f7c474a8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 85,
"avg_line_length": 23.53846153846154,
"alnum_prop": 0.5800653594771242,
"repo_name": "rug-compling/hmm-reps",
"id": "5f501f681c6c5305fff12aaf06a4ca79d7804d22",
"size": "612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trees/edge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "12728"
},
{
"name": "Python",
"bytes": "341860"
}
],
"symlink_target": ""
} |
"""
Test runner for the JSON Schema official test suite
Tests comprehensive correctness of each draft's validator.
See https://github.com/json-schema/JSON-Schema-Test-Suite for details.
"""
from decimal import Decimal
import glob
import json
import io
import itertools
import os
import re
import subprocess
try:
from sys import pypy_version_info
except ImportError:
pypy_version_info = None
from jsonschema import (
FormatError, SchemaError, ValidationError, Draft3Validator,
Draft4Validator, FormatChecker, draft3_format_checker,
draft4_format_checker, validate,
)
from jsonschema.compat import PY3
from jsonschema.tests.compat import mock, unittest
import jsonschema
REPO_ROOT = os.path.join(os.path.dirname(jsonschema.__file__), os.path.pardir)
SUITE = os.getenv("JSON_SCHEMA_TEST_SUITE", os.path.join(REPO_ROOT, "json"))
if not os.path.isdir(SUITE):
raise ValueError(
"Can't find the JSON-Schema-Test-Suite directory. Set the "
"'JSON_SCHEMA_TEST_SUITE' environment variable or run the tests from "
"alongside a checkout of the suite."
)
TESTS_DIR = os.path.join(SUITE, "tests")
JSONSCHEMA_SUITE = os.path.join(SUITE, "bin", "jsonschema_suite")
REMOTES = subprocess.Popen(
["python", JSONSCHEMA_SUITE, "remotes"], stdout=subprocess.PIPE,
).stdout
if PY3:
REMOTES = io.TextIOWrapper(REMOTES)
REMOTES = json.load(REMOTES)
def make_case(schema, data, valid, name):
if valid:
def test_case(self):
kwargs = getattr(self, "validator_kwargs", {})
validate(data, schema, cls=self.validator_class, **kwargs)
else:
def test_case(self):
kwargs = getattr(self, "validator_kwargs", {})
with self.assertRaises(ValidationError):
validate(data, schema, cls=self.validator_class, **kwargs)
if not PY3:
name = name.encode("utf-8")
test_case.__name__ = name
return test_case
def maybe_skip(skip, test, case):
if skip is not None:
reason = skip(case)
if reason is not None:
test = unittest.skip(reason)(test)
return test
def load_json_cases(tests_glob, ignore_glob="", basedir=TESTS_DIR, skip=None):
if ignore_glob:
ignore_glob = os.path.join(basedir, ignore_glob)
def add_test_methods(test_class):
ignored = set(glob.iglob(ignore_glob))
for filename in glob.iglob(os.path.join(basedir, tests_glob)):
if filename in ignored:
continue
validating, _ = os.path.splitext(os.path.basename(filename))
id = itertools.count(1)
with open(filename) as test_file:
for case in json.load(test_file):
for test in case["tests"]:
name = "test_%s_%s_%s" % (
validating,
next(id),
re.sub(r"[\W ]+", "_", test["description"]),
)
assert not hasattr(test_class, name), name
test_case = make_case(
data=test["data"],
schema=case["schema"],
valid=test["valid"],
name=name,
)
test_case = maybe_skip(skip, test_case, case)
setattr(test_class, name, test_case)
return test_class
return add_test_methods
class TypesMixin(object):
@unittest.skipIf(PY3, "In Python 3 json.load always produces unicode")
def test_string_a_bytestring_is_a_string(self):
self.validator_class({"type" : "string"}).validate(b"foo")
class DecimalMixin(object):
def test_it_can_validate_with_decimals(self):
schema = {"type" : "number"}
validator = self.validator_class(
schema, types={"number" : (int, float, Decimal)}
)
for valid in [1, 1.1, Decimal(1) / Decimal(8)]:
validator.validate(valid)
for invalid in ["foo", {}, [], True, None]:
with self.assertRaises(ValidationError):
validator.validate(invalid)
def missing_format(checker):
def missing_format(case):
format = case["schema"].get("format")
if format not in checker.checkers or (
# datetime.datetime is overzealous about typechecking in <=1.9
format == "date-time" and
pypy_version_info is not None and
pypy_version_info[:2] <= (1, 9)
):
return "Format checker {0!r} not found.".format(format)
return missing_format
class FormatMixin(object):
def test_it_returns_true_for_formats_it_does_not_know_about(self):
validator = self.validator_class(
{"format" : "carrot"}, format_checker=FormatChecker(),
)
validator.validate("bugs")
def test_it_does_not_validate_formats_by_default(self):
validator = self.validator_class({})
self.assertIsNone(validator.format_checker)
def test_it_validates_formats_if_a_checker_is_provided(self):
checker = mock.Mock(spec=FormatChecker)
validator = self.validator_class(
{"format" : "foo"}, format_checker=checker,
)
validator.validate("bar")
checker.check.assert_called_once_with("bar", "foo")
cause = ValueError()
checker.check.side_effect = FormatError('aoeu', cause=cause)
with self.assertRaises(ValidationError) as cm:
validator.validate("bar")
# Make sure original cause is attached
self.assertIs(cm.exception.cause, cause)
@load_json_cases("draft3/*.json", ignore_glob="draft3/refRemote.json")
@load_json_cases(
"draft3/optional/format.json", skip=missing_format(draft3_format_checker)
)
@load_json_cases("draft3/optional/bignum.json")
@load_json_cases("draft3/optional/zeroTerminatedFloats.json")
class TestDraft3(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
validator_class = Draft3Validator
validator_kwargs = {"format_checker" : draft3_format_checker}
def test_any_type_is_valid_for_type_any(self):
validator = self.validator_class({"type" : "any"})
validator.validate(mock.Mock())
# TODO: we're in need of more meta schema tests
def test_invalid_properties(self):
with self.assertRaises(SchemaError):
validate({}, {"properties": {"test": True}},
cls=self.validator_class)
def test_minItems_invalid_string(self):
with self.assertRaises(SchemaError):
# needs to be an integer
validate([1], {"minItems" : "1"}, cls=self.validator_class)
@load_json_cases("draft4/*.json", ignore_glob="draft4/refRemote.json")
@load_json_cases(
"draft4/optional/format.json", skip=missing_format(draft4_format_checker)
)
@load_json_cases("draft4/optional/bignum.json")
@load_json_cases("draft4/optional/zeroTerminatedFloats.json")
class TestDraft4(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
validator_class = Draft4Validator
validator_kwargs = {"format_checker" : draft4_format_checker}
# TODO: we're in need of more meta schema tests
def test_invalid_properties(self):
with self.assertRaises(SchemaError):
validate({}, {"properties": {"test": True}},
cls=self.validator_class)
def test_minItems_invalid_string(self):
with self.assertRaises(SchemaError):
# needs to be an integer
validate([1], {"minItems" : "1"}, cls=self.validator_class)
class RemoteRefResolutionMixin(object):
def setUp(self):
patch = mock.patch("jsonschema.validators.requests")
requests = patch.start()
requests.get.side_effect = self.resolve
self.addCleanup(patch.stop)
def resolve(self, reference):
_, _, reference = reference.partition("http://localhost:1234/")
return mock.Mock(**{"json.return_value" : REMOTES.get(reference)})
@load_json_cases("draft3/refRemote.json")
class Draft3RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
validator_class = Draft3Validator
@load_json_cases("draft4/refRemote.json")
class Draft4RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
validator_class = Draft4Validator
| {
"content_hash": "62a55a50c137cacb578bd9023748b48d",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 78,
"avg_line_length": 33.602409638554214,
"alnum_prop": 0.6287797298912394,
"repo_name": "neumerance/cloudloon2",
"id": "e95dbbed371a926100f37362c0336da60f91144f",
"size": "8367",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/jsonschema/tests/test_jsonschema_test_suite.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "178040"
},
{
"name": "JavaScript",
"bytes": "460971"
},
{
"name": "Perl",
"bytes": "1954"
},
{
"name": "Python",
"bytes": "3227734"
},
{
"name": "Ruby",
"bytes": "76"
},
{
"name": "Shell",
"bytes": "14108"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.