repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
carquois/blobon | blobon/books/migrations/0004_auto__add_item__add_time.py | Python | mit | 9,603 | 0.008435 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Item'
db.create_table('books_item', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('cost', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2)),
('quantity', self.gf('django.db.models.fields.PositiveIntegerField')(blank=True)),
))
db.send_create_signal('books', ['Item'])
# Adding model 'Time'
db.create_table('books_time', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['books.Task'], null=True, blank=True)),
('notes', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('rate_per_hour', self.gf('django.db.models.fields.PositiveIntegerField')(blank=True)),
('time', self.gf('django.db.models.fields.PositiveIntegerField')(blank=True)),
))
db.send_create_signal('books', ['Time'])
def backwards(self, orm):
# Deleting model 'Item'
db.delete_table('books_item')
# Deleting model 'Time'
db.delete_table('books_time')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'books.client': {
'Meta': {'object_name': 'Client'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
| 'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'street_adress': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'books.expense': {
'Meta': {'object_name': 'Expense'},
| 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'books.invoice': {
'Meta': {'object_name': 'Invoice'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Client']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_issue': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_number': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'terms': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'books.item': {
'Meta': {'object_name': 'Item'},
'cost': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'})
},
'books.project': {
'Meta': {'object_name': 'Project'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Client']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'rate_per_hour': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'})
},
'books.task': {
'Meta': {'object_name': 'Task'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['books.Project']"}),
'rate_per_hour': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'})
},
'books.tax': {
'Meta': {'object_name': 'Tax'},
'compound_tax': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': (' |
MSeifert04/numpy | numpy/core/tests/test_scalarmath.py | Python | bsd-3-clause | 28,365 | 0.002503 | from __future__ import division, absolute_import, print_function
import sys
import warnings
import itertools
import operator
import platform
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
assert_warns
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
floating_types = np.floating.__subclasses__()
complex_floating_types = np.complexfloating.__subclasses__()
# This compares scalarmath against ufuncs.
class TestTypes(object):
def test_types(self):
for atype in types:
a = atype(1)
assert_(a == 1, "error with %r: got %r" % (atype, a))
def test_type_add(self):
# list of types
for k, atype in enumerate(types):
a_scalar = atype(3)
a_array = np.array([3], dtype=atype)
for l, btype in enumerate(types):
b_scalar = btype(1)
b_array = np.array([1], dtype=btype)
c_scalar = a_scalar + b_scalar
c_array = a_array + b_array
# It was comparing the type numbers, but the new ufunc
# function-finding mechanism finds the lowest function
# to which both inputs can be cast - which produces 'l'
# when you do 'q' + 'b'. The old function finding mechanism
# skipped ahead based on the first argument, but that
# does not produce properly symmetric results...
assert_equal(c_scalar.dtype, c_array.dtype,
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
def test_type_create(self):
for k, atype in enumerate(types):
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
assert_equal(a, b)
def test_leak(self):
# test leak of scalar objects
# a leak would show up in valgrind as still-reachable of ~2.6MB
for i in range(200000):
np.add(1, 1)
class TestBaseMath(object):
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
type='binary',
max_size=sz):
exp1 = np.ones_like(inp1)
inp1[...] = np.ones_like(inp1)
inp2[...] = np.zeros_like(inp2)
assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
np.add(inp1, inp2, out=out)
assert_almost_equal(out, exp1, err_msg=msg)
inp2[...] += np.arange(inp2.size, dtype=dt) + 1
assert_almost_equal(np.square(inp2),
np.multiply(inp2, inp2), err_msg=msg)
# skip true divide for ints
if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning):
assert_almost_equal(np.reciprocal(inp2),
np.divide(1, inp2), err_msg=msg)
inp1[...] = np.ones_like(inp1)
np.add(inp1, 2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
inp2[...] = np.ones_like(inp2)
np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_almost_equal(d + d, d * 2)
np.add(d, d, out=o)
np.add(np.ones_like(d), d, out=o)
np.add(d, np.ones_like(d), out=o)
np.add(np.ones_like(d), d)
np.add(d, np.ones_like(d))
class TestPower(object):
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
b = a ** 4
assert_(b == 81, "error with %r: got %r" % (t, b))
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
a = t(51)
b = a ** 4
msg = "error with %r: got %r" % (t, b)
if np.issubdtype(t, np.integer):
assert_(b == 6765201, msg)
else:
assert_almost_equal(b, 6765201, err_msg=msg)
def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer
# has common type np.float64. The other combinations should all
# raise a ValueError for integer ** negative integer.
exp = [np.array(-1, dt)[()] for dt in 'bhilq']
# 1 ** -1 possible special case
base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, 1.)
# -1 ** -1 possible special case
base = [np.array(-1, dt)[()] for dt in 'bhilq']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, -1.)
# 2 ** -1 perhaps generic
base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, .5)
def test_mixed_types(self):
typelist = [np.int8, np.int16, np.float16,
np.float32, np.float64, np.int8,
np.int16, np.int32, np.int64]
for t1 in typelist:
for t2 in typelist:
a = t1(3)
b = t2(2)
result = a**b
msg = ("error with %r and %r:"
"got %r, expected %r") % (t1, t2, result, 9)
if np.issubdtype(np.dtype(result), np.integer):
assert_(result == 9, msg)
else:
assert_almost_equal(result, 9, err_msg=msg)
def test_modular_power(self):
# modular power is not implemented, so ensure it errors
a = 5
b = 4
c = 10
expected = pow(a, b, c) # noqa: F841
for t in (np.int32, np.float32, np.complex64):
# note that 3-operand power only dispatches on the first argument
assert_raises(TypeError, operator.pow, t(a), b, c)
assert_raises(TypeError, op | erator.pow, np.array(t(a)), b, c)
def floordiv_and_mod(x, y):
return (x // y, x % y)
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
class TestModulus(object):
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floor | div_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg |
Ayi-/flask_HRmanager | flaskcode.py | Python | mit | 2,988 | 0.005716 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# ****************************************************************#
# ScriptName:
# Author: Eli
# Create Date:
# Modify Author:
# Modify Date:
# Function:
# ****************************************************************#
import random
import ImageDraw
from PIL import Image
import ImageFont,ImageFilter
__author__ = 'Eli'
#map:将str函数作用于后面序列的每一个元素
numbers = ''.join(map(str, range(10)))
chars = ''.join((numbers))
def create_validate_code(size=(120, 30),
chars=chars,
mode="RGB",
bg_color=(255, 255, 255),
fg_color=(255, 0, 0),
font_size=18,
| font_type="FreeMono.ttf",
length=4,
draw_points=True,
point_chance = 2):
'''''
size: 图片的大小,格式(宽,高),默认为(120, 30)
chars: 允许的字符集合,格式字符串
mode: 图片模式,默认为RGB
bg_color: 背景颜色,默认为白色
fg_color: 前景色,验证码字符颜色
font_size: 验证码字体大小
font_type: 验证码字体,默认为 Monaco.ttf
length: 验证码字符个数
draw_points: 是否画干扰点
point_chance: 干扰点出现的概率,大小范围[0, 50]
'''
width, height = size
| img = Image.new(mode, size, bg_color) # 创建图形
draw = ImageDraw.Draw(img) # 创建画笔
def get_chars():
'''''生成给定长度的字符串,返回列表格式'''
return random.sample(chars, length)
def create_points():
'''''绘制干扰点'''
chance = min(50, max(0, int(point_chance))) # 大小限制在[0, 50]
for w in xrange(width):
for h in xrange(height):
tmp = random.randint(0, 50)
if tmp > 50 - chance:
draw.point((w, h), fill=(0, 0, 0))
def create_strs():
'''''绘制验证码字符'''
c_chars = get_chars()
strs = '%s' % ''.join(c_chars)
font = ImageFont.truetype(font_type, font_size)
font_width, font_height = font.getsize(strs)
draw.text(((width - font_width) / 3, (height - font_height) / 4),
strs, font=font, fill=fg_color)
return strs
if draw_points:
create_points()
strs = create_strs()
# 图形扭曲参数
params = [1 - float(random.randint(1, 2)) / 100,
0,
0,
0,
1 - float(random.randint(1, 10)) / 100,
float(random.randint(1, 2)) / 500,
0.001,
float(random.randint(1, 2)) / 500
]
img = img.transform(size, Image.PERSPECTIVE, params) # 创建扭曲
img = img.filter(ImageFilter.EDGE_ENHANCE_MORE) # 滤镜,边界加强(阈值更大)
return img,strs |
purpleidea/gedit-plugins | plugins/commander/modules/goto.py | Python | gpl-2.0 | 1,938 | 0.001032 | # -*- coding: utf-8 -*-
#
# goto.py - goto commander module
#
# Copyright (C) 2010 - Jesse van den Kieboom
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, | USA.
import os
import commander.commands as commands
import commander.commands.completion
import commander.commands.result
import commander.commands.exceptions
__commander_module__ = True
def __default__(view, line, column=1):
"""Goto line number"""
buf = view.get_buffer()
ins = buf.get_insert()
citer = buf.get_it | er_at_mark(ins)
try:
if line.startswith('+'):
linnum = citer.get_line() + int(line[1:])
elif line.startswith('-'):
linnum = citer.get_line() - int(line[1:])
else:
linnum = int(line) - 1
column = int(column) - 1
except ValueError:
raise commander.commands.exceptions.Execute('Please specify a valid line number')
linnum = min(max(0, linnum), buf.get_line_count() - 1)
citer = buf.get_iter_at_line(linnum)
column = min(max(0, column), citer.get_chars_in_line() - 1)
citer = buf.get_iter_at_line(linnum)
citer.forward_chars(column)
buf.place_cursor(citer)
view.scroll_to_iter(citer, 0.0, True, 0, 0.5)
return commander.commands.result.HIDE
# vi:ex:ts=4:et
|
HarryR/ffff-dnsp2p | libbenc/make.py | Python | gpl-2.0 | 1,298 | 0.008475 | #!/usr/bin/env python
import subprocess
import os
class MakeException(Exception):
pass
def swapExt(path, current, replacement):
path, ext = os.path.splitext(path)
if ext == current:
path += replacement
return path
else:
raise MakeException(
"swapExt: expected file name ending in %s, got file name ending in %s" % \
(current, replacement))
headerFiles = [
'benc.h',
'bencode.h',
]
codeFiles = [
'benc_int.c',
'benc_bstr.c',
'benc_list.c',
'benc_dict.c',
'bencode.c',
'bcopy.c',
]
cflags = ['-g']
programFile = 'bcopy'
def gcc(*packedArgs):
args = []
for arg in packedArgs:
| if isinstance(arg, list):
args += arg
elif isinstance(arg, tuple):
args += list(arg)
else:
args.append(arg)
subprocess.check_call(['gcc'] + args)
def compile(codeFile, cflags=[]):
objectFile = swapExt(codeFile, '.c', '.o')
gcc(cflags, '-c', ('-o', objectFile), codeFile)
return obj | ectFile
def link(programFile, objectFiles, cflags=[]):
gcc(cflags, ('-o', programFile), objectFiles)
if __name__ == '__main__':
objectFiles = [compile(codeFile, cflags) for codeFile in codeFiles]
link(programFile, objectFiles, cflags)
|
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/aio/operations/_virtual_machine_extension_images_operations.py | Python | mit | 9,333 | 0.004822 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machine_extension_images_operations import build_get_request, build_list_types_request, build_list_versions_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineExtensionImagesOperations:
"""VirtualMachineExtensionImagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2015_06_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
location: str,
publisher_name: str,
type: str,
version: str,
**kwargs: Any
) -> "_models.VirtualMachineExtensionImage":
"""Gets a virtual machine extension image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param version:
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtensionImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2015_06_15.models.VirtualMachineExtensionImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineExtensionImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
location=location,
publisher_name=publisher_name,
type=type,
version=version,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineExtensionImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}'} # type: ignore
@distributed_trace_async
async def list_types(
self,
location: str,
publisher_name: str,
**kwargs: Any
) -> List["_models.VirtualMachineExtensionImage"]:
"""Gets a list of virtual machine extension image types.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineExtensionImage, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2015_06_15.models.VirtualMachineExtensionImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineExtensionImage"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_ | map.update(kwargs.pop('error_map', {}))
| request = build_list_types_request(
location=location,
publisher_name=publisher_name,
subscription_id=self._config.subscription_id,
template_url=self.list_types.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineExtensionImage]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_types.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types'} # type: ignore
@distributed_trace_async
async def list_versions(
self,
location: str,
publisher_name: str,
type: str,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> List["_models.VirtualMachineExtensionImage"]:
"""Gets a list of virtual machine extension image versions.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineExtensionImage, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2015_06_15.models.VirtualMachineExtensionImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineExtensionImage"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_versions_request(
location=location,
|
rokj/django_basketball | urls.py | Python | mit | 1,670 | 0.011976 | from django.conf.urls.defaults import *
from django.views.generic.list_detail import object_detail
from django.conf import settings
from django.contrib.auth.views import login, logout
from django.views.generic.simple import redirect_to
from django.views.decorators.cache import cache_page
from common import views as common_views
from basketball import views as basketball_views
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^$', post_views.main_view),
# basketball
# this are examples, change it to your needs
(r'^team/2010-2011/$', basketball_views.team_view, { 'slug': 'team-2010-2011' }),
(r'^team/2011-2012/$', basketball_views.team_view, { 'slug': 'team-2011-2012' }),
url(r'^game/2010-2011/$', cache_page(basketball_views.games_view, settings.CACHE_SECONDS), { 'slug': '1-league-2010-2011-west' }, | name='games_view_2010-2011'),
url(r'^game/2011-2012/$', cache_page(basketball_views.games_view, settings.CACHE_SECONDS), { 'slug': '1-league-2011-2012-west' }, name='games_view_2011-2012'),
(r'^games/$', redirect_to, {'url': '/games/2011-2012/'}),
url(r'^(?P<url>games/\d{4}-\d{4}/(?P<slug>[\w-]+)/(?P<date_played>\d{4}-\d{2}-\d{2}))/$', cache_page(basketball_views.game_view, settings.CACHE_SECONDS), name='game_view'),
| (r'^player/(?P<slug>[\w-]+)/$', basketball_views.player_view),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve', { 'document_root': settings.MEDIA_ROOT , 'show_indexes': True }),
)
|
anythingrandom/eclcli | eclcli/network/networkclient/common/utils.py | Python | apache-2.0 | 10,377 | 0.000096 | import argparse
import logging
import netaddr
import os
from oslo_utils import encodeutils
from oslo_utils import importutils
import six
from . import exceptions
from ..i18n import _
ON_STATE = "ON"
OFF_STATE = "OFF"
def env(*vars, **kwargs):
for v in vars:
value = os.environ.get(v)
if value:
return value
return kwargs.get('default', '')
def get_client_class(api_name, version, version_map):
try:
client_path = version_map[str(version)]
except (KeyError, ValueError):
msg = _("Invalid %(api_name)s client version '%(version)s'. must be "
"one of: %(map_keys)s")
msg = msg % {'api_name': api_name, 'version': version,
'map_keys': ', '.join(version_map.keys())}
raise exceptions.UnsupportedVersion(msg)
return importutils.import_class(client_path)
def get_item_properties(item, fields, mixed_case_fields=(), formatters=None):
if formatters is None:
formatters = {}
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](item))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
if not hasattr(item, field_name) and isinstance(item, dict):
data = item[field_name]
else:
data = getattr(item, field_name, '')
if data is None:
data = ''
row.append(data)
return tuple(row)
def str2bool(strbool):
if strbool is None:
return None
return strbool.lower() == 'true'
def str2dict(strdict):
if not strdict:
return {}
return dict([kv.split('=', 1) for kv in strdict.split(',')])
def http_log_req(_logger, args, kwargs):
if not _logger.isEnabledFor(logging.DEBUG):
return
string_parts = ['curl -i']
for element in args:
if element in ('GET', 'POST', 'DELETE', 'PUT'):
string_parts.append(' -X %s' % element)
else:
string_parts.append(' %s' % element)
for element in kwargs['headers']:
header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
string_parts.append(header)
if 'body' in kwargs and kwargs['body']:
| string_parts.append(" -d '%s'" % (kwargs['body']))
req = encodeutils.safe_encode("".join(string_parts))
_logger.debug("\nREQ: %s\n", req)
def http_log_resp(_logger, resp, body):
if not _logger.isEnabledFor(logging.DEBUG):
return
_logger.debug("RESP:%(code)s %(headers)s %(bo | dy)s\n",
{'code': resp.status_code,
'headers': resp.headers,
'body': body})
def _safe_encode_without_obj(data):
if isinstance(data, six.string_types):
return encodeutils.safe_encode(data)
return data
def safe_encode_list(data):
return list(map(_safe_encode_without_obj, data))
def safe_encode_dict(data):
def _encode_item(item):
k, v = item
if isinstance(v, list):
return (k, safe_encode_list(v))
elif isinstance(v, dict):
return (k, safe_encode_dict(v))
return (k, _safe_encode_without_obj(v))
return dict(list(map(_encode_item, data.items())))
def add_boolean_argument(parser, name, **kwargs):
for keyword in ('metavar', 'choices'):
kwargs.pop(keyword, None)
default = kwargs.pop('default', argparse.SUPPRESS)
parser.add_argument(
name,
metavar='{True,False}',
choices=['True', 'true', 'False', 'false'],
default=default,
**kwargs)
def is_valid_cidr(cidr):
try:
netaddr.IPNetwork(cidr)
return True
except Exception:
return False
class APIDictWrapper(object):
_apidict = {}
def __init__(self, apidict):
self._apidict = apidict
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._apidict:
raise
return self._apidict[attr]
def __getitem__(self, item):
try:
return getattr(self, item)
except (AttributeError, TypeError) as e:
raise KeyError(e)
def __contains__(self, item):
try:
return hasattr(self, item)
except TypeError:
return False
def get(self, item, default=None):
try:
return getattr(self, item)
except (AttributeError, TypeError):
return default
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._apidict)
def to_dict(self):
return self._apidict
class ESIAPIDictWrapper(APIDictWrapper):
def set_id_as_name_if_empty(self, length=8):
try:
if not self._apidict['name']:
id = self._apidict['id']
if length:
id = id[:length]
self._apidict['name'] = '(%s)' % id
except KeyError:
pass
def items(self):
return self._apidict.items()
@property
def name_or_id(self):
return (self._apidict.get('name') or
'(%s)' % self._apidict['id'][:13])
class Network(ESIAPIDictWrapper):
def __init__(self, apiresource):
apiresource['admin_state'] = \
'UP' if apiresource['admin_state_up'] else 'DOWN'
for key in apiresource.keys():
if key.find(':'):
apiresource['__'.join(key.split(':'))] = apiresource[key]
super(Network, self).__init__(apiresource)
class Subnet(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(Subnet, self).__init__(apiresource)
class Port(ESIAPIDictWrapper):
def __init__(self, apiresource):
apiresource['admin_state'] = \
'UP' if apiresource['admin_state_up'] else 'DOWN'
if 'mac_learning_enabled' in apiresource:
apiresource['mac_state'] = \
ON_STATE if apiresource['mac_learning_enabled'] else OFF_STATE
super(Port, self).__init__(apiresource)
class PhysicalPort(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(PhysicalPort, self).__init__(apiresource)
class ReservedAddress(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(ReservedAddress, self).__init__(apiresource)
class Quota(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(Quota, self).__init__(apiresource)
class InternetGateway(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(InternetGateway, self).__init__(apiresource)
class InternetService(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(InternetService, self).__init__(apiresource)
class VPNGateway(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(VPNGateway, self).__init__(apiresource)
class InterDCGateway(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(InterDCGateway, self).__init__(apiresource)
class InterDCService(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(InterDCService, self).__init__(apiresource)
class InterDCInterface(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(InterDCInterface, self).__init__(apiresource)
class VPNService(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(VPNService, self).__init__(apiresource)
class StaticRoute(ESIAPIDictWrapper):
def __init__(self, apiresource):
super(StaticRoute, self).__init__(apiresource)
class PubicIP(ESIAPIDictWrapper):
def __init__(self, apiresource):
cidr = apiresource.get('cidr', '')
submask_length = str(apiresource.get('submask_length', ''))
apiresource.update({
'prefixed_cidr': cidr + '/' + submask_length
})
super(PubicIP, self).__init__(apiresource)
class GwInterface(ESIAPIDictWrapper):
def __init__(self, apiresource):
apiresource.setdefault('network_name', ' |
hagabbar/pycbc_copy | pycbc/events/events.py | Python | gpl-3.0 | 32,066 | 0.00368 | # Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This modules defines functions for clustering and thresholding timeseries to
produces event triggers
"""
from __future__ import absolute_import
import lal, numpy, copy, os.path
from pycbc import WEAVE_FLAGS
from pycbc.types import Array
from pycbc.types import convert_to_process_params_dict
from pycbc.scheme import schemed
from pycbc.detector import Detector
from . import coinc
@schemed("pycbc.events.threshold_")
def threshold(series, value):
"""Return list of values and indices values over threshold in series.
"""
return None, None
@schemed("pycbc.events.threshold_")
def threshold_only(series, value):
"""Return list of values and indices whose values in series are
larger (in absolute value) than value
"""
return None, None
#FIXME: This should be under schemed, but I don't understand that yet!
def threshold_real_numpy(series, value):
arr = series.data
locs = numpy.where(arr > value)[0]
vals = arr[locs]
return locs, vals
@schemed("pycbc.events.threshold_")
def threshold_and_cluster(series, threshold, window):
"""Return list of values and indices values over threshold in series.
"""
return
@schemed("pycbc.events.threshold_")
def _threshold_cluster_factory(series):
return
class ThresholdCluster(object):
"""Create a threshold and cluster engine
Parameters
----------
series : complex64
Input pycbc.types.Array (or subclass); it will be searched for
points above threshold that are then clustered
"""
def __new__(cls, *args, **kwargs):
real_cls = _threshold_cluster_factory(*args, **kwargs)
return real_cls(*args, **kwargs) # pylint:disable=not-callable
# The class below should serve as the parent for all schemed classes.
# The intention is that this class serves simply as the location for
# all documentation of the class and its methods, though that is not
# yet implemented. Perhaps something along the lines of:
#
# http://stackoverflow.com/questions/2025562/inherit-docstrings-in-python-class-inheritance
#
# will work? Is there a better way?
class _BaseThresholdCluster(object):
def threshold_and_cluster(self, threshold, window):
"""
Threshold and cluster the memory specified at instantiation with the
threshold specified at creation and the window size specified at creation.
Parameters:
-----------
threshold : float32
The minimum absolute value of the series given at object initialization
to return when thresholding and clustering.
window : uint32
The size (in number of samples) of the window over which to cluster
Returns:
--------
event_vals : complex64
Numpy array, complex values of the clustered events
event_locs : uint32
Numpy array, indices into series of location of events
"""
pass
def findchirp_cluster_over_window(times, values, window_length):
""" Reduce the events by clustering over a window using
the FindChirp clustering algorithm
Parameters
-----------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR value
window_size: int
The size of the window in integer samples. Must be positive.
Returns
-------
indices: Array
The reduced list of indices of the SNR values
"""
assert window_length > 0, 'Clustering window length is not positive'
from weave import inline
indices = numpy.zeros(len(times), dtype=int)
tlen = len(times) # pylint:disable=unused-variable
k = numpy.zeros(1, dtype=int)
absvalues = abs(values) # pylint:disable=unused-variable
times = times.astype(int)
code = """
int j = 0;
int curr_ind = 0;
for (int i=0; i < tlen; i++){
if ((times[i] - times[curr_ind]) > window_length){
j += 1;
indices[j] = i;
curr_ind = i;
}
else if (absvalues[i] > absvalues[curr_ind]){
indices[j] = i;
curr_ind = i;
}
}
k[0] = j;
"""
inline(code, ['times', 'absvalues', 'window_length', 'indices', 'tlen', 'k'],
extra_compile_args=[WEAVE_FLAGS])
return indices[0:k[0]+1]
def cluster_reduce(idx, snr, window_size):
""" Reduce the events by clustering over a window
Parameters
-----------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR value
window_size: int
The size of the window in integer samples.
Returns
-------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR values
"""
ind = findchirp_cluster_over_window(idx, snr, window_size)
return idx.take(ind), snr.take(ind)
def newsnr(snr, reduced_x2, q=6., n=2.):
"""Calculate the re-weighted SNR statistic ('newSNR') from given SNR and
reduced chi-squared values. See http://arxiv.org/abs/1208.3491 for
definition. Previous implementation in glue/ligolw/lsctables.py
"""
newsnr = numpy.array(snr, ndmin=1, dtype=numpy.float64)
reduced_x2 = numpy.array(reduced_x2, ndmin=1, dtype=numpy.float64)
# newsnr is only different from snr if reduced chisq > 1
ind = numpy.where(reduced_x2 > 1.)[0]
newsnr[ind] *= ( 0.5 * (1. + reduced_x2[ind] ** (q/n)) ) ** (-1./q)
if len(newsnr) > 1:
return newsnr
else:
return newsnr[0]
def newsnr_sgveto(snr, bchisq, sgchisq):
""" Combined SNR derived from NewSNR and Sine-Gaussian Chisq"""
# Test function
nsnr = newsnr(snr, bchisq)
nsnr = numpy.array(nsnr, ndmin=1)
sgchisq = numpy.array(sgchisq, ndmin=1)
t = numpy.array(sgchisq > 4, ndmin=1)
if len(t) > 0:
nsnr[t] = nsnr[t] / (sgchisq[t] / 4.0) ** 0.5
if len(nsnr) > 1:
return nsnr
else:
return nsnr[0]
def effsnr(snr, reduced_x2, fac=250.):
"""Calculate the effective SNR statistic. See (S5y1 paper) for definition.
Previous implementation in glue/ligolw/lsctables.py
"""
snr = numpy.array(snr, ndmin=1, dtype=numpy.float64)
rchisq = numpy.array(reduced_x2, ndmin=1, dtype=numpy.float64)
effsnr = snr / (1 + snr ** 2 / fac) ** 0.25 / rchisq ** 0.25
if len(effsnr) > 1:
return effsnr
else:
return effsnr[0]
class EventManager(object):
def __init__(self, opt, column, column_types, **kwds):
self.opt = opt
self.global_params = kwds
self.event_dtype = [ ('template_id', int) ]
for column, coltype in zip (column, column_types):
self.event_dtype.append( (column, coltype) )
self.events = numpy.array([], dtype=self.event_dtype)
self.template_params = []
self.template_index = -1
self.template_events = numpy.array([], dtype=self.event_dtype)
self.write_pe | rformance = False
@classmethod
def from_multi_ifo_interface(cls, opt, ifo, column, column_types, **kwds):
"""
| To use this for a single ifo from the multi ifo interface requires
some small fixing of the |
janpipek/hlava | hlava/items/formats/json_item.py | Python | mit | 860 | 0.005814 | from json import loads as json_loads
import collections
from .text_item import AbstractTextItem
from .tree_item import AbstractTreeItem
from .. import register
@register
class JsonItem(AbstractTextItem, Abstrac | tTreeItem):
extensions = ("json",)
mime_type = "text/x-json"
new_item_order_preference = 1.62
type_description = lambda: "JSON (JavaScript Object Notation)"
new_item_content = lambda name: "{\n \"_title\" : \"" + name + "\"\n}\n"
@property
def json(self):
if not "_json" in dir(self):
self._json = json_loads(self.text, object_pairs_hook=collections.OrderedDict)
return self._json
def _repr_html_(self | ):
try:
json = self.json
return self._tree_to_html(json)
except:
return "<b>Invalid JSON:</b><pre>{0}</pre>".format(self.text)
|
norayr/unisubs | utils/breadcrumbs.py | Python | agpl-3.0 | 1,063 | 0.000941 | # Amara, universalsubtitles.org
#
# Copyright (C) 2015 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hop | e that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.core.urlresolvers import reverse
class BreadCrumb(object):
def __init__(self, label, view_name=None, *args, **kwargs): |
self.label = unicode(label)
if view_name:
self.url = reverse(view_name, args=args, kwargs=kwargs)
else:
self.url = None
|
google-research/google-research | correct_batch_effects_wdn/forgetting_nuisance.py | Python | apache-2.0 | 55,433 | 0.006061 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Training to forget nuisance variables."""
import collections
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import pandas as pd
from scipy import sparse
from six.moves import range
from six.moves import zip
import six.moves.cPickle as pickle
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
from correct_batch_effects_wdn import io_utils
from correct_batch_effects_wdn import transform
from tensorflow.python.ops import gen_linalg_ops # pylint: disable=g-direct-tensorflow-import
INPUT_NAME = "inputs"
OUTPUT_NAME = "outputs"
DISCRIMINATOR_NAME = "discriminator"
CLASSIFIER_NAME = "classifier"
UNIT_GAUSS_NAME = "unit_gauss"
CRITIC_LAYER_NAME = "critic_layer"
POSSIBLE_LOSSES = (DISCRIMINATOR_NAME, CLASSIFIER_NAME, UNIT_GAUSS_NAME)
INPUT_KEY_INDEX = 0
REGISTRY_NAME = "params_registry_python_3"
WASSERSTEIN_NETWORK = "WassersteinNetwork"
WASSERSTEIN_2_NETWORK = "Wasserstein2Network"
MEAN_ONLY_NETWORK = "MeanOnlyNetwork"
WASSERSTEIN_SQRD_NETWORK = "WassersteinSqrdNetwork"
WASSERSTEIN_CUBED_NETWORK = "WassersteinCubedNetwork"
POSSIBLE_NETWORKS = [
WASSERSTEIN_NETWORK, WASSERSTEIN_2_NETWORK, WASSERSTEIN_SQRD_NETWORK,
WASSERSTEIN_CUBED_NETWORK, MEAN_ONLY_NETWORK
]
FLAGS = flags.FLAGS
flags.DEFINE_string("input_df", None, "Path to the embedding dataframe.")
flags.DEFINE_string("save_dir", None, "location of file to save.")
flags.DEFINE_integer("num_steps_pretrain", None, "Number of steps to pretrain.")
flags.DEFINE_integer("num_steps", None, "Number of steps (after pretrain).")
flags.DEFINE_integer("disc_steps_per_training_step", None, "Number critic steps"
"to use per main training step.")
flags.DEFINE_enum("network_type", "WassersteinNetwork", POSSIBLE_NETWORKS,
"Network to use. Can be WassersteinNetwork.")
flags.DEFINE_integer("batch_n", 10, "Number of points to use per minibatch"
"for each loss.")
flags.DEFINE_float("learning_rate", 1e-4, "Initial learning rate to use.")
flags.DEFINE_float("epsilon", 0.01, "Regularization for covariance.")
flags.DEFINE_integer("feature_dim", 192, "Number of feature dimensions.")
flags.DEFINE_integer("checkpoint_interval", 4000, "Frequency to save to file.")
flags.DEFINE_spaceseplist("target_levels", "compound",
"dataframe target levels.")
flags.DEFINE_spaceseplist("nuisance_levels", "batch",
"dataframe nuisance levels.")
flags.DEFINE_integer(
"layer_width", 2, "Width of network to use for"
"approximating the Wasserstein distance.")
flags.DEFINE_integer(
"num_layers", 2, "Number of layers to use for"
"approximating the Wasserstein distance.")
flags.DEFINE_string(
"reg_dir", None, "Directory to registry file, or None to"
"save in save_dir.")
flags.DEFINE_float("lambda_mean", 0., "Penalty for the mean term of the affine"
"transformation.")
flags.DEFINE_float("lambda_cov", 0., "Penalty for the cov term of the affine"
"transformation.")
flags.DEFINE_integer("seed", 42, "Seed to use for numpy.")
flags.DEFINE_integer("tf_seed", 42, "Seed to use for tensorflow.")
flags.DEFINE_float(
"cov_fix", 0.001, "Multiple of identity to add if using"
"Wasserstein-2 distance.")
################################################################################
##### Functions and classes for storing and retrieving data
################################################################################
def get_dense_arr(matrix):
"""Convert a sparse matrix to numpy array.
Args:
matrix (matrix, sparse matrix, or ndarray): input
Returns:
dense numpy array.
"""
if sparse.issparse(matrix):
return matrix.toarray()
else:
return np.array(matrix)
class DataShuffler(object):
"""Object to hold and shuffle data.
Adapted from
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn/python/learn/datasets/mnist.py
Attributes:
inputs (ndarray): The inputs specified in __init__.
outputs (ndarray): The outputs specified in __init__.
"""
def __init__(self, inputs, outputs, random_state):
"""Inits DataShuffler given inputs, outputs, and a random state.
Args:
inputs (ndarray): 2-dimensional array containing the inputs, where each
row is an individual input. The columns represent the different
dimensions of an individual entry.
outputs (ndarray): 2-dimensional array containing the outputs, where each
row is an individual output.
random_state (None or int): seed to feed to numpy.random.
"""
assert inputs.shape[0] == outputs.shape[0]
self.inputs = inputs
self.outputs = outputs
self._epochs_completed = -1
self._num_examples = inputs.shape[0]
self._random_state = random_state
self._next_indices = []
def next_batch(self, batch_size, shuffle=True):
"""Helper method for next_batch.
Args:
batch_size (int): Number of items to pick.
shuffle (bool): whether to shuffle the data or not.
Returns:
A tuple of 2-dimensional ndarrays whose shape along the first axis is
equal to batch_size. The rows in the first element correspond to inputs,
and in the second element to the outputs.
"""
indices = []
while len(indices) < batch_size:
if not self._next_indices:
self._epochs_completed += 1
self._next_indices = list(reversed(list(range(self._num_examples))))
if shuffle:
self._random_state.shuffle(self._next_indices)
indices.append(self._next_indices.pop())
return (get_dense_arr(self.inputs[indices]),
get_dense_arr(self.outputs[indices]))
def _make_canonical_key(x):
"""Try to convert to a hashable type.
First, if the input is a list of length 1, take its first component instead.
Next, try to con | vert to a tuple if not hashable.
Args:
x (list, tuple, string, or None): input to convert to a key
Returns:
Hashable object
"""
if isinstance(x, list) an | d len(x) == 1:
x = x[0]
if not isinstance(x, collections.Hashable):
return tuple(x)
else:
return x
def split_df(df, columns_split):
"""Split a dataframe into two by column.
Args:
df (pandas dataframe): input dataframe to split.
columns_split (int): Column at which to split the dataframes.
Returns:
df1, df2 (pandas dataframes): Split df into two dataframe based on column.
The first has the first column_split-1 columns, and the second has columns
columns_split onward.
"""
return df.iloc[:, :columns_split], df.iloc[:, columns_split:]
def tuple_in_group_with_wildcards(needle, haystack):
"""Checks if needle is in haystack, allowing for wildcard components.
Returns True if either needle or haystack is None, or needle is in haystack.
Components in haystack are tuples. These tuples can have entries equal to None
which serve as wildcard components.
For example, if haystack = [(None, ...), ...], The first tuple in haystack
has a wildcard for its first component.
Args:
needle (tuple): tuple to check if in group
haystack (list): list of tuples of the same dimensions as tup.
Returns:
True if the tuple is in the group, False otherwise
Raises:
ValueError: Length of tup must match length of each tuple in group."
"""
if needle is None or haystack is None:
return True
if any(len(needle) != len(it_needle) for it_needle in haystack):
raise ValueError("Leng |
GNOME/gnome-python | examples/gconf/simple-controller.py | Python | lgpl-2.1 | 683 | 0.021962 | #!/usr/bin/env python
#
# A very simple program that sets a single key value when you type
# it in an entry and press return
#
import gtk
import gconf
def entry_activated_callback(entry, client):
s = entry.get_chars (0, -1)
client.set_string ("/testing/ | directory/key", s)
window = gtk.Window()
entry = gtk.Entry ()
window.add (entry)
client = gconf.client_get_default ()
client.add_dir ("/testing/directory",
gconf.CLIENT_PRELOAD_NONE)
entry.connect ('activate', entry_activated_callback, client)
# If key isn't writable, then set insensitive
entry.set_sensitive (client.key_is_writable ("/testing/directory/key"))
window.show_all ()
gtk.main | ()
|
SUSE/ceph-deploy-to-be-deleted | ceph_deploy/cli.py | Python | mit | 5,580 | 0.000896 | import pkg_resources
import argparse
import logging
import textwrap
import os
import sys
from string import join
import ceph_deploy
from ceph_deploy import exc, validate
from ceph_deploy.util import log
from ceph_deploy.util.decorators import catches
LOG = logging.getLogger(__name__)
__header__ = textwrap.dedent("""
-^-
/ \\
|O o| ceph-deploy v%s
).-.(
'/|||\`
| '|` |
'|`
Full documentation can be found at: http://ceph.com/ceph-deploy/docs
""" % ceph_deploy.__version__)
def log_flags(args, logger=None):
logger = logger or LOG
logger.info('ceph-deploy options:')
for k, v in args.__dict__.items():
if k.startswith('_'):
continue
logger.info(' %-30s: %s' % (k, v))
def get_parser():
parser = argparse.ArgumentParser(
prog='ceph-deploy',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Easy Ceph deployment\n\n%s' % __header__,
)
verbosity = parser.add_mutually_exclusive_group(required=False)
verbosity.add_argument(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='be more verbose',
)
verbosity.add_argument(
'-q', '--quiet',
action='store_true', dest='quiet',
help='be less verbose',
)
parser.add_argument(
'--version',
action='version',
version='%s' % ceph_deploy.__version__,
help='the current installed version of ceph-deploy',
)
parser.add_argument(
'--username',
help='the username to connect to the remote host',
)
parser.add_argument(
'--overwrite-conf',
action='store_true',
help='overwrite an existing conf file on remote host (if present)',
)
parser.add_argument(
'--cluster',
metavar='NAME',
help='name of the cluster',
type=validate.alphanumeric,
)
parser.add_argument(
'--ceph-conf',
dest='ceph_conf',
help='use (or reuse) a given ceph.conf file',
)
sub = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
entry_points = [
(ep.name, ep.load())
for ep in pkg_resources.iter_entry_points('ceph_deploy.cli')
]
entry_points.sort(
key=lambda (name, fn): getattr(fn, 'priority', 100),
)
for (name, fn) in entry_points:
p = sub.add_parser(
name,
description=fn.__doc__,
help=fn.__doc__,
)
# ugly kludge but i really want to have a nice way to access
# the program name, with subcommand, later
p.set_defaults(prog=p.prog)
if not os.environ.get('CEPH_DEPLOY_TEST'):
p.set_defaults(cd_conf=ceph_deploy.conf.cephdeploy.load())
# flag if the default release is being used
p.set_defaults(default_release=False)
fn(p)
parser.set_defaults(
# we want to hold on to this, for later
prog=parser.prog,
cluster | ='ceph',
)
return parser
@catches((KeyboardInterrupt, RuntimeError, exc.DeployError,), handle_all=True)
def _main(args=None, namespace=None):
# Set console logging first with some defaults, to prevent hav | ing exceptions
# before hitting logging configuration. The defaults can/will get overridden
# later.
# Console Logger
sh = logging.StreamHandler()
sh.setFormatter(log.color_format())
sh.setLevel(logging.WARNING)
# because we're in a module already, __name__ is not the ancestor of
# the rest of the package; use the root as the logger for everyone
root_logger = logging.getLogger()
# allow all levels at root_logger, handlers control individual levels
root_logger.setLevel(logging.DEBUG)
root_logger.addHandler(sh)
parser = get_parser()
if len(sys.argv) < 2:
parser.print_help()
sys.exit()
else:
args = parser.parse_args(args=args, namespace=namespace)
console_loglevel = logging.DEBUG # start at DEBUG for now
if args.quiet:
console_loglevel = logging.WARNING
if args.verbose:
console_loglevel = logging.DEBUG
# Console Logger
sh.setLevel(console_loglevel)
# File Logger
fh = logging.FileHandler('{cluster}.log'.format(cluster=args.cluster))
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter(log.BASE_FORMAT))
root_logger.addHandler(fh)
# Reads from the config file and sets values for the global
# flags and the given sub-command
# the one flag that will never work regardless of the config settings is
# logging because we cannot set it before hand since the logging config is
# not ready yet. This is the earliest we can do.
args = ceph_deploy.conf.cephdeploy.set_overrides(args)
LOG.info("Invoked (%s): %s" % (
ceph_deploy.__version__,
join(sys.argv, " "))
)
log_flags(args)
return args.func(args)
def main(args=None, namespace=None):
try:
_main(args=args, namespace=namespace)
finally:
# This block is crucial to avoid having issues with
# Python spitting non-sense thread exceptions. We have already
# handled what we could, so close stderr and stdout.
if not os.environ.get('CEPH_DEPLOY_TEST'):
try:
sys.stdout.close()
except:
pass
try:
sys.stderr.close()
except:
pass
|
brainstorm/bcbio-nextgen | bcbio/structural/shared.py | Python | mit | 13,904 | 0.004387 | """Shared functionality useful across multiple structural variant callers.
Handles exclusion regions and preparing discordant regions.
"""
import collections
import os
import numpy
import pybedtools
import pysam
import toolz as tz
import yaml
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.bam import callable
from bcbio.ngsalign import postalign
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import shared, config_utils
from bcbio.provenance import do
from bcbio.structural import regions
from bcbio.variation import bedutils, population
# ## Case/control
def find_case_control(items):
"""Find case/control items in a population of multiple samples.
"""
cases = []
controls = []
for data in items:
if population.get_affected_status(data) == 1:
controls.append(data)
else:
cases.append(data)
return cases, controls
# ## Prepare exclusion regions (repeats, telomeres, centromeres)
def _get_sv_exclude_file(items):
"""Retrieve SV file of regions to exclude.
"""
sv_bed = utils.get_in(items[0], ("genome_resources", "variation", "sv_repeat"))
if sv_bed and os.path.exists(sv_bed):
return sv_bed
def _get_variant_regions(items):
"""Retrieve variant regions defined in any of the input items.
"""
return filter(lambda x: x is not None,
[tz.get_in(("config", "algorithm", "variant_regions"), data)
for data in items
if tz.get_in(["config", "algorithm", "coverage_interval"], data) != "genome"])
def has_variant_regions(items, base_file, chrom=None):
"""Determine if we should process this chromosome: needs variant regions defined.
"""
if chrom:
all_vrs = _get_variant_regions(items)
if len(all_vrs) > 0:
test = shared.subset_variant_regions(tz.first(all_vrs), chrom, base_file, items)
if test == chrom:
return False
return True
def remove_exclude_regions(orig_bed, base_file, items, remove_entire_feature=False):
"""Remove centromere and short end regions from an existing BED file of regions to target.
"""
out_bed = os.path.join("%s-noexclude.bed" % (utils.splitext_plus(base_file)[0]))
exclude_bed = prepare_exclude_file(items, base_file)
with file_transaction(items[0], out_bed) as tx_out_bed:
pybedtools.BedTool(orig_bed).subtract(pybedtools.BedTool(exclude_bed),
A=remove_entire_feature, nonamecheck=True).saveas(tx_out_bed)
if utils.file_exists(out_bed):
return out_bed
else:
return orig_bed
def get_base_cnv_regions(data, | work_dir):
"""Retrieve set of target regions for CNV analysis.
Subsets to extended transcript regions for WGS experiments to avoid
long runtimes.
"""
cov_interval = dd.get_coverage_interval(data)
base_regions = regions.get_sv_bed(data)
# if we don't have a configured BED or region | s to use for SV caling
if not base_regions:
# For genome calls, subset to regions within 10kb of genes
if cov_interval == "genome":
base_regions = regions.get_sv_bed(data, "transcripts1e4", work_dir)
if base_regions:
base_regions = remove_exclude_regions(base_regions, base_regions, [data])
# Finally, default to the defined variant regions
if not base_regions:
base_regions = dd.get_variant_regions(data)
return bedutils.clean_file(base_regions, data)
def prepare_exclude_file(items, base_file, chrom=None):
"""Prepare a BED file for exclusion.
Excludes high depth and centromere regions which contribute to long run times and
false positive structural variant calls.
"""
out_file = "%s-exclude%s.bed" % (utils.splitext_plus(base_file)[0], "-%s" % chrom if chrom else "")
if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"):
with shared.bedtools_tmpdir(items[0]):
# Get a bedtool for the full region if no variant regions
want_bedtool = callable.get_ref_bedtool(tz.get_in(["reference", "fasta", "base"], items[0]),
items[0]["config"], chrom)
if chrom:
want_bedtool = pybedtools.BedTool(shared.subset_bed_by_chrom(want_bedtool.saveas().fn,
chrom, items[0]))
sv_exclude_bed = _get_sv_exclude_file(items)
if sv_exclude_bed and len(want_bedtool) > 0:
want_bedtool = want_bedtool.subtract(sv_exclude_bed, nonamecheck=True).saveas()
if any(dd.get_coverage_interval(d) == "genome" for d in items):
want_bedtool = pybedtools.BedTool(shared.remove_highdepth_regions(want_bedtool.saveas().fn, items))
with file_transaction(items[0], out_file) as tx_out_file:
full_bedtool = callable.get_ref_bedtool(tz.get_in(["reference", "fasta", "base"], items[0]),
items[0]["config"])
if len(want_bedtool) > 0:
full_bedtool.subtract(want_bedtool, nonamecheck=True).saveas(tx_out_file)
else:
full_bedtool.saveas(tx_out_file)
return out_file
def exclude_by_ends(in_file, exclude_file, data, in_params=None):
"""Exclude calls based on overlap of the ends with exclusion regions.
Removes structural variants with either end being in a repeat: a large
source of false positives.
Parameters tuned based on removal of LCR overlapping false positives in DREAM
synthetic 3 data.
"""
params = {"end_buffer": 50,
"rpt_pct": 0.9,
"total_rpt_pct": 0.2,
"sv_pct": 0.5}
if in_params:
params.update(in_params)
assert in_file.endswith(".bed")
out_file = "%s-norepeats%s" % utils.splitext_plus(in_file)
to_filter = collections.defaultdict(list)
removed = 0
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with shared.bedtools_tmpdir(data):
for coord, end_name in [(1, "end1"), (2, "end2")]:
base, ext = utils.splitext_plus(tx_out_file)
end_file = _create_end_file(in_file, coord, params, "%s-%s%s" % (base, end_name, ext))
to_filter = _find_to_filter(end_file, exclude_file, params, to_filter)
with open(tx_out_file, "w") as out_handle:
with open(in_file) as in_handle:
for line in in_handle:
key = "%s:%s-%s" % tuple(line.strip().split("\t")[:3])
total_rpt_size = sum(to_filter.get(key, [0]))
if total_rpt_size <= (params["total_rpt_pct"] * params["end_buffer"]):
out_handle.write(line)
else:
removed += 1
return out_file, removed
def _find_to_filter(in_file, exclude_file, params, to_exclude):
"""Identify regions in the end file that overlap the exclusion file.
We look for ends with a large percentage in a repeat or where the end contains
an entire repeat.
"""
for feat in pybedtools.BedTool(in_file).intersect(pybedtools.BedTool(exclude_file), wao=True, nonamecheck=True):
us_chrom, us_start, us_end, name, other_chrom, other_start, other_end, overlap = feat.fields
if float(overlap) > 0:
other_size = float(other_end) - float(other_start)
other_pct = float(overlap) / other_size
us_pct = float(overlap) / (float(us_end) - float(us_start))
if us_pct > params["sv_pct"] or (other_pct > params["rpt_pct"]):
to_exclude[name].append(float(overlap))
return to_exclude
def _create_end_file(in_file, coord, params, out_file):
with open(in_file) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handl |
akranga/mafia-serverless | solutions/day.py | Python | apache-2.0 | 1,580 | 0.01519 | import os, sys
# to read dependencies from ./lib direcroty
script_dir = os.path.dirname( os.path.realpath(__file__) )
sys.path.insert(0, script_dir + os.sep + "lib")
import logging, boto3, json, random
# for dynamodb filter queries
from boto3.dynamodb.conditions import Key, Attr
# setup log level to DEBUG
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# initialize DynamoDB client
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['DYNAMO_TABLE'])
def find_all_uncovered():
return table.scan(
FilterExpression=Attr('Identity').eq('Uncovered')
)['Items']
def find_by_identity(identity):
return table.scan(
FilterExpression=Attr('TrueIdentity').eq(identity) &
Attr('Identity').eq('Uncovered')
)['Items']
def handler(event, context):
message = [
"Day, time to wake up!",
"Players see the dead body and makes their accusations"
]
innocent = find_by_identity('Innocent')
anybody = find_all_uncovered()
for p in anybody:
if p['TrueIdentity'] == 'Mafia':
accused = random.choice(innocent)
else:
accused = random.choice(anybody)
message.append("{} blames on {}".format(p['Name'], accused['Name']))
message.append("Who is the Mafia?")
return response({"Message": message}, event)
def response(body, event, code=200):
if 'resource' in event and 'httpMethod' in event:
return {
'statusCode': code,
'headers': {},
| 'bod | y': json.dumps(body, indent=4, separators=(',', ':'))
}
return body |
douville/qcri | qcri/application/gui.py | Python | bsd-2-clause | 27,582 | 0.000145 | """
The GUI to QCRI.
"""
# pylint: disable=I0011, no-member, missing-docstring
import threading
import logging
from sys import version_info
import pythoncom
from qcri.application import importer
from qcri.application import qualitycenter
# pylint: disable=I0011, import-error
if version_info.major == 2:
import Tkinter as tk
import tkMessageBox as messagebox
import tkFileDialog as filedialog
import ttk
import Queue as queue
elif version_info.major == 3:
import tkinter as tk
from tkinter import messagebox
from tkinter import filedialog
from tkinter import ttk
import queue
LOG = logging.getLogger(__name__)
def work_in_background(tk_, func, callback=None):
"""
Processes func in background.
"""
window = BusyWindow()
done_queue = queue.Queue()
def _process():
func()
done_queue.put(True)
def _process_queue():
try:
done_queue.get_nowait()
window.destroy()
if callback:
callback()
except queue.Empty:
tk_.after(100, _process_queue)
thread = threading.Thread(target=_process)
thread.start()
tk_.after(100, _process_queue)
def center(widget, width, height):
"""
Center the given widget.
"""
screen_width = widget.winfo_screenwidth()
screen_height = widget.winfo_screenheight()
x_offset = int(screen_width / 2 - width / 2)
y_offset = int(screen_height / 2 - height / 2)
widget.geometry('{}x{}+{}+{}'.format(width, height, x_offset, y_offset))
# todo: add <rightclick> <selectall>
class QcriGui(tk.Tk):
"""
The main window.
"""
def __init__(self, cfg):
tk.Tk.__init__(self)
self.cfg = cfg # ConfigParser
self.qcc = None # the Quality Center connection
self.valid_parsers = {}
self._cached_tests = {} # for the treeview
self._results = {} # test results
self.dir_dict = {}
self.bug_dict = {}
self.protocol("WM_DELETE_WINDOW", self.on_closing)
self.title('QC Results Importer')
center(self, 1200, 700)
# tkinter widgets
self.menubar = None
self.remote_path = None
self.choose_parser = None
self.choose_results_button = None
self.qcdir_tree = None
self.upload_button = None
self.choose_results_entry = None
self.runresults_tree = None
self.runresultsview = None
self.header_frame = None
self.qc_connected_frm = None
self.qc_disconnected_frm = None
self.link_bug = None
self.qc_domain = tk.StringVar()
self.attach_report = tk.IntVar()
self.qc_project = tk.StringVar()
self.runresultsvar = tk.StringVar()
self.qc_conn_status = tk.BooleanVar()
# build the gui
self._make()
# style = ttk.Style()
# style.theme_settings("default", {
# "TCombobox": {
# "configure": {"padding": 25}
# }
# })
def on_closing(self):
"""
Called when the window is closed.
:return:
"""
self.disconnect_qc()
self.destroy()
def disconnect_qc(self):
"""
Release the QC connection
"""
qualitycenter.disconnect(self.qcc)
self.qc_conn_status.set(False)
def _make(self):
# the Main Frame
main_frm = tk.Frame(self)
full_pane = tk.PanedWindow(
main_frm, orient=tk.HORIZONTAL, sashpad=4, sashrelief=tk.RAISED)
local_pane = self._create_local_pane(full_pane)
remote_pane = self._create_remote_pane(full_pane)
full_pane.add(local_pane)
full_pane.add(remote_pane)
full_pane.paneconfigure(local_pane, sticky='nsew', minsize=400)
full_pane.paneconfigure(remote_pane, sticky='nsew', minsize=400)
full_pane.grid(row=1, column=0, sticky='nsew', padx=10, pady=10)
main_frm.grid(row=0, column=0, sticky='nsew', padx=5, pady=5)
main_frm.rowconfigure(1, weight=1)
main_frm.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
def _create_local_pane(self, full_pane):
local_pane = tk.LabelFrame(full_pane, text='Test Results')
self.choose_results_button = tk.Button(
local_pane,
text='Results',
width=15,
command=self._load_run_results)
self.choose_results_button.grid(
row=0, column=0, sticky='ew', padx=10, pady=5)
self.choose_results_entry = tk.Entry(
local_pane, state='disabled', textvariable=self.runresultsvar)
self.choose_results_entry.grid(
row=0, column=1, sticky='nsew', padx=10, pady=5)
self.choose_parser = ttk.Combobox(
local_pane, show='', state='disabled')
self.choose_parser.bind(
'<<ComboboxSelected>>', self._on_parser_changed)
self.choose_parser.grid(
row=1, column=0, columnspan=2, sticky='nsew', padx=10, pady=7)
self.runresultsview = TestResultsView(
local_pane, on_selected=self._on_test_result_selected)
self.runresultsview.grid(
row=2, column=0, columnspan=2, sticky='nsew', padx=10, pady=5) |
self.runresultsview.rowconfigure(0, weight=1)
self.runresultsview.columnconfigure(0, weight=1)
local_pane.rowconfigure(2, weight=1)
local_pane.columnconfigure(1, weight=1)
local_pane.confi | g(padx=10)
return local_pane
def _on_qc_conn_status_changed(self, *_):
if self.qc_conn_status.get():
self.qc_connected_frm.tkraise()
self.upload_button.config(state=tk.NORMAL)
else:
self.qc_disconnected_frm.tkraise()
self.upload_button.config(state=tk.DISABLED)
for row in self.qcdir_tree.get_children():
self.qcdir_tree.delete(row)
# we didn't change selection, but fire off the events
self._on_test_result_selected(None)
def _create_remote_pane(self, parent):
remote_pane = tk.LabelFrame(parent, text='Quality Center')
self.header_frame = tk.Frame(remote_pane)
# QC Disconnected Frame
self.qc_disconnected_frm = tk.Frame(self.header_frame)
if self.cfg.getboolean('main', 'history'):
hist = importer.load_history()
else:
hist = None
qc_connect_button = tk.Button(
self.qc_disconnected_frm,
text='Connect',
command=lambda: LoginWindow(self.login_callback, hist),
width=15)
qc_connect_button.grid(row=0, column=0, sticky='ew', pady=5)
self.qc_disconnected_frm.grid(row=0, column=0, sticky='nsew')
# QC Connected Frame
self.qc_connected_frm = tk.Frame(self.header_frame)
qc_disconnect_button = tk.Button(
self.qc_connected_frm, text='Disconnect',
command=self.disconnect_qc, width=15)
qc_disconnect_button.grid(
row=0, column=0, sticky='ew', padx=(0, 10), pady=5)
domain_label = tk.Label(
self.qc_connected_frm, text='Domain:', font=('sans-serif 10 bold'))
domain_label.grid(row=0, column=1)
domain_val_lbl = tk.Label(
self.qc_connected_frm, textvariable=self.qc_domain)
domain_val_lbl.grid(row=0, column=2, sticky='w', padx=10)
project_label = tk.Label(
self.qc_connected_frm, text='Project:', font=('sans-seif 10 bold'))
project_label.grid(row=0, column=3)
project_val_lbl = tk.Label(
self.qc_connected_frm, textvariable=self.qc_project)
project_val_lbl.grid(row=0, column=4, sticky='w', padx=10)
self.qc_connected_frm.columnconfigure(4, weight=1)
self.qc_connected_frm.grid(row=0, column=0, sticky='nsew')
# raise the disconnected frame first
self.qc_disconnected_frm.tkraise()
self.qc_conn_status.trace('w', self._on_qc_conn_status_changed)
# self.header_frame.columnconfigure(1, weight=1)
|
jms/potential-bassoon | srl/tests.py | Python | bsd-3-clause | 1,507 | 0.001327 | from django.test import TestCase
from srl.services.parse import numtosxg, sxgtonum
from srl.management.commands.create_fake_users import create_fake_users
from srl.views import get_random_user
from django.contrib.auth.models import User
class TestBaseConversion(TestCase):
def test_check0(self):
assert numtosxg(0) == '0'
def test_check1(self):
assert numtosxg(1) == '1'
def test_check60(self):
assert numtosxg(60) == '10'
class TestBaseConversionReverse(TestCase):
def test_check0(self):
assert sxgtonum('0') == 0
| def test_check1(self):
assert sxgtonum('1') == 1
def test_check60(self):
assert sxgtonum('10') == 60
def test_check1337(self):
assert sxgtonum('NH') == 1337
def test_checkl(self):
assert sxgtonum('l') == 1
def test_checkI(self):
assert sxgtonum('I') == 1
def test_checkO(self):
assert sxgtonum('O') | == 0
def test_checkpipe(self):
assert sxgtonum('|') == 0
def test_checkcomma(self):
assert sxgtonum(',') == 0
class TestRoundtripCheck(TestCase):
def test_roundtrip(self):
# sxgtonum(numtosxg(n))==n for all n
for integer in range(0, 6000):
sxg = numtosxg(integer)
result = sxgtonum(sxg)
assert integer == result
class TestRandomUser(TestCase):
def test_get_random_user(self):
create_fake_users(10)
u = get_random_user()
assert isinstance(u, User) |
LordDarkula/polypy | polypy/product.py | Python | mit | 2,416 | 0.002897 | from .commutative import Commutative
class Product(Commutative):
def __init__(self, *args):
super(Product, self).__init__(*self.simplified(*args))
def simplified(self, *args):
"""
Returns a sequence containing expressions that make a simplified Product.
Used when ``Product`` is initialized to simplify.
Uses ``self.exprs`` when no arguments are provided.
:type: args: int or Expression
:rtype: seq
"""
coefficient = 1
args = args or self._exprs
for arg in args:
if isinstance(arg, int):
# If any part is 0 the whole thing is 0
if arg == 0:
yield None
# 1 can be eliminated because 1 * x = x
if arg == 1:
continue
coefficient *= arg
else:
yield arg
if coefficient != 0:
yield coefficient
def __call__(self, val):
prod = 1
for expr in self._exprs:
prod *= self._val_of_exp(expr, val)
return prod
def degree(self):
"""
Returns total degree (ex degree x is 1, degree 3x^3 is 3) of product.
:rtype: int
"""
deg | = 0
for expr in self._exprs:
de | g += self._calc_degree(expr)
return deg
def order(self, ascending=True):
"""
Converts ''frozenset'' exprs into ''list'' ordered by degree.
:rtype: list
"""
return super(Product, self).order(ascending=True)
def same_base(self, other):
return isinstance(other, self.__class__) and \
self.rem_int() == other.rem_int()
def rem_int(self):
return frozenset([expr for expr in self._exprs if not isinstance(expr, int)])
def __str__(self):
return ''.join("{} * ".format(expr) for expr in self.order())[:-2] # Removes leftover *
def __mul__(self, other):
if not isinstance(other, self.__class__):
return Product(*self._exprs, other)
no_overlap = self._exprs.union(other.exprs) - self._exprs.intersection(other.exprs)
overlap = set([expr**2 for expr in self._exprs.intersection(other.exprs)])
return no_overlap.union(overlap)
def __pow__(self, power, modulo=None):
return Product(*[expr**power for expr in self._exprs])
|
sdague/home-assistant | homeassistant/components/gios/__init__.py | Python | apache-2.0 | 2,334 | 0.001714 | """The GIOS component."""
import logging
from aiohttp.client_exceptions import ClientConnectorError
from async_timeout import timeout
from gios import ApiError, Gios, InvalidSensorsData, NoStationError
from homeassistant.core import Config, HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import CONF_STATION_ID, DOMAIN, SCAN_IN | TERVAL
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: Config) -> bool:
"""Set up configured GIOS."""
return True
async def async_setup_entry(hass, config_entry):
"""Set up GIO | S as config entry."""
station_id = config_entry.data[CONF_STATION_ID]
_LOGGER.debug("Using station_id: %s", station_id)
websession = async_get_clientsession(hass)
coordinator = GiosDataUpdateCoordinator(hass, websession, station_id)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = coordinator
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "air_quality")
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
hass.data[DOMAIN].pop(config_entry.entry_id)
await hass.config_entries.async_forward_entry_unload(config_entry, "air_quality")
return True
class GiosDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold GIOS data."""
def __init__(self, hass, session, station_id):
"""Class to manage fetching GIOS data API."""
self.gios = Gios(station_id, session)
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)
async def _async_update_data(self):
"""Update data via library."""
try:
with timeout(30):
await self.gios.update()
except (
ApiError,
NoStationError,
ClientConnectorError,
InvalidSensorsData,
) as error:
raise UpdateFailed(error) from error
return self.gios.data
|
eoss-cloud/madxxx_catalog_api | catalog/manage/__init__.py | Python | mit | 705 | 0.001418 | #-*- coding: utf-8 -*-
""" EOSS catalog system
| external catalog management package
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "twehrmann@eoss.cloud"
__status__ = "Production"
from | abc import ABCMeta, abstractmethod
from utilities import with_metaclass
@with_metaclass(ABCMeta)
class ICatalog(object):
"""
Simple catalog interface class
"""
def __init__(self):
pass
@abstractmethod
def find(self):
pass
@abstractmethod
def register(self, ds):
pass
|
leakim/GameOfLifeKata | python/resources.py | Python | mit | 1,181 | 0.004237 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Resources to make it easier and faster to implement and test game | of life
#
# @author Mikael Wikström
# https://github.com/leakim/GameOfLifeKata
#
import pygame
class GameOfLife:
# still
BLOCK_0 = set([(0, 0), (0, 1), (1, 0), (1, 1)])
BLOCK_1 = BLOCK_0
| # oscillators
THREE_0 = set([(0, 1), (0, 0), (0, 2)])
THREE_1 = set([(0, 1), (-1, 1), (1, 1)])
# spaceships (moves)
GLIDER_0 = set([(0, 1), (1, 2), (0, 0), (0, 2), (2, 1)])
GLIDER_1 = set([(0, 1), (1, 2), (-1, 1), (1, 0), (0, 2)])
def move(state, (x, y)):
newstate = set()
for (u, v) in state:
newstate.add((x + u, y + v))
return newstate
#WINDOW_SIZE = [2*255, 2*255]
#screen = pygame.display.set_mode(WINDOW_SIZE)
def draw(screen, state, rows=25, cols=25, MARGIN=1):
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
(w, h) = screen.get_size()
width, height = w/cols, h/rows
screen.fill(BLACK)
for (x, y) in state:
pygame.draw.rect(screen, WHITE, [
(MARGIN + width) * x + MARGIN,
(MARGIN + height) * y + MARGIN,
width, height])
pygame.display.flip()
|
PanDAWMS/panda-bigmon-atlas | setup.py | Python | apache-2.0 | 36 | 0 | #!/ | usr/bin/env python
#
# Setup
#
# | |
iulian787/spack | var/spack/repos/builtin/packages/iwyu/package.py | Python | lgpl-2.1 | 1,882 | 0.003188 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Iwyu(CMakePackage):
"""include-what-you-use: A tool for use with clang to analyze #includes in
C and C++ source files
"""
homepage = "https://include-what-you-use.org"
url = "https://include-what-you-use.org/downloads/include-what-you-use-0.13.src.tar.gz"
maintainers = ['sethrj']
version('0.14', sha256='43184397db57660c32e3298a6b1fd5ab82e808a1f5ab0591d6745f8d256200ef')
version('0.13', sha256='49294270aa64e8c04182369212cd919f3b3e0e47601b1f935f038c761c265bc9')
version('0.12', sha256='a5892fb0abccb820c394e4e245c00ef30fc94e4ae58a048b23f94047c0816025')
version('0.11', sh | a256='2d2877726c4aed9518cbb37673ffbc2b7da9c239bf8fe29432da35c1c0ec367a')
patch('iwyu-013-cmake.patch', when='@0.13:0.14')
depends_on('llvm+clang@10.0:10.999', when='@0.14')
depends_on('llvm+clang@9.0:9.999', when='@0.13')
| depends_on('llvm+clang@8.0:8.999', when='@0.12')
depends_on('llvm+clang@7.0:7.999', when='@0.11')
# Non-X86 CPU use all_targets variants because iwyu use X86AsmParser
depends_on('llvm+all_targets', when='target=aarch64:')
depends_on('llvm+all_targets', when='target=arm:')
depends_on('llvm+all_targets', when='target=ppc:')
depends_on('llvm+all_targets', when='target=ppcle:')
depends_on('llvm+all_targets', when='target=ppc64:')
depends_on('llvm+all_targets', when='target=ppc64le:')
depends_on('llvm+all_targets', when='target=sparc:')
depends_on('llvm+all_targets', when='target=sparc64:')
@when('@0.14:')
def cmake_args(self):
return [self.define('CMAKE_CXX_STANDARD', 14),
self.define('CMAKE_CXX_EXTENSIONS', False)]
|
acdh-oeaw/vhioe | entities/views.py | Python | mit | 5,608 | 0.000178 | from django.views.generic.detail import DetailView
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views import generic
from django.views.generic.edit import DeleteView, CreateView, UpdateView
from .forms import BandForm, ArchivForm, InstitutionForm, PersonForm, BearbeiterForm
from .models import Band, Archiv, Institution, Person, Bearbe | iter
class BearbeiterListView(generic.ListView):
model = Bearbeiter
template_name = 'entities/bearbeiter_list.html'
context_object_name = 'object_list'
class BearbeiterDetailView(DetailView):
model = Bearbeiter
class BearbeiterCreate(CreateView):
model = Bearbeiter
template_name_suffix = '_create'
form_class = BearbeiterForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(BearbeiterCreate, self).dispatch(*args, | **kwargs)
class BearbeiterUpdate(UpdateView):
model = Bearbeiter
template_name_suffix = '_create'
form_class = BearbeiterForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(BearbeiterUpdate, self).dispatch(*args, **kwargs)
class BearbeiterDelete(DeleteView):
model = Bearbeiter
template_name = 'vocabs/confirm_delete.html'
success_url = reverse_lazy('browsing:browse_bearbeiter')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(BearbeiterDelete, self).dispatch(*args, **kwargs)
class PersonListView(generic.ListView):
model = Person
template_name = 'entities/band_list.html'
context_object_name = 'object_list'
class PersonDetailView(DetailView):
model = Person
class PersonCreate(CreateView):
model = Person
template_name_suffix = '_create'
form_class = PersonForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PersonCreate, self).dispatch(*args, **kwargs)
class PersonUpdate(UpdateView):
model = Person
template_name_suffix = '_create'
form_class = PersonForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PersonUpdate, self).dispatch(*args, **kwargs)
class PersonDelete(DeleteView):
model = Person
template_name = 'vocabs/confirm_delete.html'
success_url = reverse_lazy('browsing:browse_persons')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PersonDelete, self).dispatch(*args, **kwargs)
class BandListView(generic.ListView):
model = Band
template_name = 'entities/band_list.html'
context_object_name = 'object_list'
def get_queryset(self):
return Band.objects.order_by('signatur')
class BandDetailView(DetailView):
model = Band
class BandCreate(CreateView):
model = Band
template_name_suffix = '_create'
form_class = BandForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(BandCreate, self).dispatch(*args, **kwargs)
class BandUpdate(UpdateView):
model = Band
template_name_suffix = '_create'
form_class = BandForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(BandUpdate, self).dispatch(*args, **kwargs)
class BandDelete(DeleteView):
model = Band
template_name = 'vocabs/confirm_delete.html'
success_url = reverse_lazy('browsing:browse_baende')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(BandDelete, self).dispatch(*args, **kwargs)
# Archiv
class ArchivDetailView(DetailView):
model = Archiv
class ArchivCreate(CreateView):
model = Archiv
template_name_suffix = '_create'
form_class = ArchivForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ArchivCreate, self).dispatch(*args, **kwargs)
class ArchivUpdate(UpdateView):
model = Archiv
template_name_suffix = '_create'
form_class = ArchivForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ArchivUpdate, self).dispatch(*args, **kwargs)
class ArchivDelete(DeleteView):
model = Archiv
template_name = 'vocabs/confirm_delete.html'
success_url = reverse_lazy('browsing:browse_archivs')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ArchivDelete, self).dispatch(*args, **kwargs)
# Institution
class InstitutionDetailView(DetailView):
model = Institution
class InstitutionCreate(CreateView):
model = Institution
template_name_suffix = '_create'
form_class = InstitutionForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(InstitutionDelete, self).dispatch(*args, **kwargs)
class InstitutionUpdate(UpdateView):
model = Institution
template_name_suffix = '_create'
form_class = InstitutionForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(InstitutionUpdate, self).dispatch(*args, **kwargs)
class InstitutionDelete(DeleteView):
model = Institution
template_name = 'vocabs/confirm_delete.html'
success_url = reverse_lazy('browsing:browse_institutions')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(InstitutionDelete, self).dispatch(*args, **kwargs)
|
hiei171/lendingbotpoloniex | modules/WebServer.py | Python | mit | 3,367 | 0.002376 | # coding=utf-8
import threading
server = None
web_server_ip = "0.0.0.0"
web_server_port = "8000"
web_server_template = "www"
def initialize_web_server(config):
'''
Setup the web server, retrieving the configuration parameters
and starting the web server thread
'''
global web_server_ip, web_server_port, web_server_template
# Check for custom web server address
compositeWebServerAddress = config.get('BOT', 'customWebServerAddress', '0.0.0.0').split(":")
# associate web server ip address
web_server_ip = compositeWebServerAddress[0]
# check for IP:PORT legacy format
if (len(compositeWebServerAddress) > 1):
# associate web server port
web_server_port = compositeWebServerAddress[1]
else:
# Check for custom web server port
web_server_port = config.get('BOT', 'customWebServerPort', '8000')
# Check for custom web server template
web_server_template = config.get('BOT', 'customWebServerTemplate', 'www')
print('Starting WebServer at {0} on port {1} with template {2}'
.format(web_server_ip, web_server_port, web_server_template))
thread = threading.Thread(target=start_web_server)
thread.deamon = True
thread.start()
def start_web_server():
'''
Start the web server
'''
import SimpleHTTPServer
import SocketServer
import socket
try:
port = int(web_server_port)
host = web_server_ip
# Do not attempt to fix code warnings in the below class, it is perfect.
class QuietHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
# quiet server logs
def log_message(self, format, *args):
return
# serve from www folder under current working dir
def translate_path(self, path):
return SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, '/' + web_server_template + path)
global server
SocketServer.TCPServer.allow_reuse_address = True
se | rver = SocketServer.TCPServer((host, port), QuietHandler)
if host == "0.0. | 0.0":
# Get all addresses that we could listen on the port specified
addresses = [i[4][0] for i in socket.getaddrinfo(socket.gethostname().split('.')[0], port)]
addresses = [i for i in addresses if ':' not in i] # Filter out all IPv6 addresses
addresses.append('127.0.0.1') # getaddrinfo doesn't always get localhost
hosts = list(set(addresses)) # Make list unique
else:
hosts = [host]
serving_msg = "http://{0}:{1}/lendingbot.html".format(hosts[0], port)
for host in hosts[1:]:
serving_msg += ", http://{0}:{1}/lendingbot.html".format(host, port)
print('Started WebServer, lendingbot status available at {0}'.format(serving_msg))
server.serve_forever()
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print('Failed to start WebServer: {0}'.format(ex.message))
def stop_web_server():
'''
Stop the web server
'''
try:
print("Stopping WebServer")
threading.Thread(target=server.shutdown).start()
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print("Failed to stop WebServer: {0}".format(ex.message))
|
egroeper/exscript | tests/Exscript/util/startTest.py | Python | gpl-2.0 | 2,091 | 0.013391 | import sys, unittest, re, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
import Exscript
import Exscript.util.start
from multiprocessing import Value
def count_calls(job, host, conn, data, **kwargs):
# Warning: Assertions raised in this function happen in a subprocess!
assert kwargs.get('testarg') == 1
assert isinstance(conn, Exscript.protocols.Protocol)
data.value += 1
class startTest(unittest.TestCase):
CORRELATE = Exscript.util.start
def setUp(self):
from Exscript import Account
from Exscript.util.decorator import bind
self.data = Value('i', 0)
self.callback = bind(count_calls, self.data, testarg = 1)
self.account = Account('test', 'test')
def doTest(self, function):
# Run on zero hosts.
function(self.account, [], self.callback, verbose = 0)
self.assertEqual(self.data.value, 0)
# Run on one host.
function(self.account, 'dummy://localhost', self.callback, verbose = 0)
self.assertEqual(self.data.value, 1)
# Run on multiple hosts.
hosts = ['dummy://host1', 'dummy://host2']
function(self.account, hosts, self.callback, verbose = 0)
self.assertEqual(self.data.value, 3)
# Run on multiple hosts with multiple threads.
function(self.account,
hosts,
| self.callback,
max_threads = 2,
| verbose = 0)
self.assertEqual(self.data.value, 5)
def testRun(self):
from Exscript.util.start import run
self.doTest(run)
def testQuickrun(self):
pass # can't really be tested, as it is user interactive
def testStart(self):
from Exscript.util.start import start
self.doTest(start)
def testQuickstart(self):
pass # can't really be tested, as it is user interactive
def suite():
return unittest.TestLoader().loadTestsFromTestCase(startTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
|
vinthony/racpider | src/redistool/clientinfo.py | Python | mit | 280 | 0.078571 | from redis import Redis
from basic import conn
def reflashState(client,count=None,network=None):
prefix = "client:"
if not client:
return False
x = dict()
if count:
conn().hset(prefix+client,"count",count)
if network:
conn | ().hset( | prefix+client,"network",network)
|
payal97/portal | systers_portal/meetup/permissions.py | Python | gpl-2.0 | 880 | 0 | from meetup.constants import *
groups_templates = {"community_member": COMMUNITY_MEMBER,
"community_moderator": COMMUNITY_MODERATOR,
"community_leader": COMMUNITY | _LEADER}
community_member_permissions = [
"add_meetup_rsvp",
"add_support_request"
]
community_moderator_permissions = community_member_permissions + [
"add_meetups",
"change_meetups",
"delete_meetups",
"approve_meetup_request",
"reject_meetup_request",
"view_meetup_request",
"appro | ve_support_request",
"reject_support_request",
"add_resource"
]
community_leader_permissions = community_moderator_permissions
group_permissions = {"community_member": community_member_permissions,
"community_moderator": community_moderator_permissions,
"community_leader": community_leader_permissions}
|
Mirantis/vmware-firewall-driver | setup.py | Python | apache-2.0 | 738 | 0 | # Copyright | 2015 Mirantis, Inc.
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an | "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
|
algorithmiaio/algorithmia-python | Algorithmia/insights.py | Python | mit | 534 | 0.007491 | import requests
import json
import os
class Insights:
# Example of correct insights:
# {"aKey":"aValue","aKey2":"a | Value2"}
def __init__(self, insights):
headers = {}
headers['Content-Type'] = 'application/json'
AQR_URL = os.getenv('ALGORITHMIA_API') or "http://localhost:9000"
insight_payload=[{"insight_key": key, "insight_value": insights[key]} for key in insights.keys()]
requests.post(AQR_URL+"/v1/insights", | data=json.dumps(insight_payload).encode('utf-8'), headers=headers)
|
thisisshi/cloud-custodian | tools/c7n_azure/c7n_azure/resources/container_registry.py | Python | apache-2.0 | 1,144 | 0 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
@resources.register('container-registry', aliases=['containerregistry'])
class ContainerRegistry(ArmResourceManager):
"""Container Registry Resource
:example:
Returns all container registry named my-test-container-registry
.. code-block:: yaml
policies:
- name: get-container-registry
resource: azure.container-registry
filter | s:
- type: value
key: name
op: eq
value: my-test-container-registry
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Containers']
service = 'azure.mgmt.containerregistry'
client = 'ContainerRegistryManagementClient'
enum_spec = ('registri | es', 'list', None)
default_report_fields = (
'name',
'location',
'resourceGroup',
'sku.name'
)
resource_type = 'Microsoft.ContainerRegistry/registries'
|
alejo8591/angular-labs | lab15/order/migrations/0004_product_product_likes.py | Python | mit | 467 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencie | s = [
('order', '0003_customer_customer_slug'),
]
operations = [
migrations.AddField(
model_name='product',
name='product_likes',
field=mo | dels.IntegerField(blank=True, null=True, default=0),
preserve_default=True,
),
]
|
nigelb/gRefer | gRefer/filer/startup.py | Python | gpl-3.0 | 1,643 | 0.003043 | #!/usr/bin/env python
# gRefer is a Bibliographic Management System that uses Google Docs
# as shared storage.
#
# Copyright (C) 2011 NigelB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNES | S FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <ht | tp://www.gnu.org/licenses/>.
from gRefer import log
def start_notifier():
import os
from gRefer.config_constants import dir_name, bibfiler_log_file
from gRefer.log import NotifyHandler
import logging
import logging.handlers
logger = logging.getLogger(name="Notifier")
if not os.path.exists(dir_name):
os.makedirs(dir_name)
fh = logging.handlers.TimedRotatingFileHandler(
os.path.join(dir_name,bibfiler_log_file),
when="midnight",
backupCount="7"
)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.root.addHandler(NotifyHandler())
logger.root.addHandler(fh)
logger.setLevel(log.TRACE)
logger.root.setLevel(log.TRACE)
from gRefer.filer.systray import run_systray
run_systray() |
JShadowMan/package | python/LeetCode OJ/1.twoSum.py | Python | mit | 341 | 0.014663 | class So | lution(object):
def twoSum(self, nums, target):
return list([ (x, y) for x in range(len(nums)) for y in range(x, len(nums)) if nums[x] + nums[y] == target | and (x != y) ][0])
if __name__ == '__main__':
solution = Solution()
print(solution.twoSum([3, 2, 4], 6))
print(solution.twoSum([2, 7, 11, 15], 9)) |
tessercat/ddj | models/menu.py | Python | mit | 3,603 | 0.001388 | def app_logo_attr():
""" Return app logo LI attributes. """
attr = {
'_class': 'navbar-brand',
'_role': 'button'}
if request.controller == 'studies':
attr['_onclick'] = 'tocModal(event);'
attr['_title'] = 'Chapter Studies'
elif request.controller == 'poems' and request.function == 'chapter':
page = (int(request.args(0)) + 8) / 9
if page == 1:
attr['_href'] = URL('poems', 'index')
attr['_title'] = 'Poems'
else:
attr['_href'] = URL('poems', 'page', args=[str(page)])
attr['_title'] = 'Poems page %d' % page
else:
attr['_href'] = URL('poems', 'index')
attr['_title'] = 'Poems'
return attr
def auth_navbar():
""" Set up right navbar for logged-in user. """
navbar_right = auth.navbar(mode='dropdown')
menu = navbar_right.element('.dropdown-menu')
menu.insert(0, LI(A('Blacklist', _href=URL('blacklist', 'index'))))
menu.insert(0, LI(A('Whitelist', _href=URL('whitelist', 'index'))))
menu.insert(0, LI('', _class='divider'))
menu.insert(0, LI(A('Unihan Dump', _href=URL('unihan', 'dump'))))
if request.controller != 'studies':
menu.insert(0, LI(A('Studies', _href=URL('studies', 'index'))))
menu.insert(0, LI(A('Manage Poems', _href=URL('poems', 'manage'))))
if request.controller != 'poems':
menu.insert(0, LI(A('Poems', _href=URL('poems', 'index'))))
if request.controller != 'about':
menu.insert(0, LI(A('About', _href=URL('about', 'index'))))
return UL(navbar_right, _class='nav navbar-nav navbar-right')
def default_study():
""" Return a URL for the default study app chapter. """
public, private = cache.ram('toc', lambda: studies_toc())
if auth.user:
toc_map = private
else:
toc_map = public
if toc_map:
return URL('studies', 'chapter', args=[toc_map.items()[0][0]])
return URL('poems', 'index')
def plain_navbar():
""" Return right navbar for non-logged-in user. """
return UL(
LI(
A('Poems', _class='nav-link', _href=URL('poems', 'index')),
_class='nav-item',
),
LI(
A('Studies', _class='nav-link', _href=URL('studies', 'index')),
_class='nav-item',
),
LI(
A('About', _class='nav-link', _href=URL('about', 'index')),
_class='nav-item',
),
_class='nav navbar-nav navbar-right'
)
def studies_toc():
""" Return a tuple of ordered dicts that map chapter id to toc links.
The first dict contains chapters that | don't have an associated English
poem, the se | cond dict contains chapters that do. """
from collections import OrderedDict
def study_link(chapter):
verse = db.verse[chapter]
url = URL('studies', 'chapter', args=[verse.chapter.number])
cls = 'studies-toc-link'
lnk = '%i %s' % (verse.chapter.number, verse.chapter.title or '')
return DIV(A(lnk, _class=cls, _href=url))
public = OrderedDict()
private = OrderedDict()
for poem in db(db.poem).select(orderby=db.poem.chapter):
link = study_link(poem.chapter)
public[int(poem.chapter)] = link
for chapter in range(1, 82):
link = study_link(chapter)
private[int(chapter)] = link
return public, private
# Set navbar elements.
response.navbar_logo = LI(A('Daoistic', **app_logo_attr()))
if auth.user:
response.navbar_right = auth_navbar()
else:
response.navbar_right = cache.ram('navbar_right', lambda: plain_navbar())
|
ulule/python-mangopay | tests/test_users.py | Python | mit | 21,419 | 0.000794 | # -*- coding: utf-8 -*-
from datetime import date
from .resources import (User, NaturalUser, Wallet,
LegalUser, Transfer, Transaction)
from .test_base import BaseTest
from mangopay.utils import Money
import responses
import requests
import time
import re
requests_session = requests.Session()
class UsersTest(BaseTest):
@responses.activate
def test_create_natural_user(self):
self.mock_natural_user()
self.register_mock({
"method": responses.PUT,
"url": re.compile(r'https://api.sandbox.mangopay.com/v2/chouette/users/natural/\d+'),
"body": {
"FirstName": "Victor",
"LastName": "Claver",
"Address": "1 rue des Misérables, Paris",
"Birthday": int(time.mktime(date.today().timetuple())),
"Nationality": "FR",
"CountryOfResidence": "FR",
"Occupation": "Writer",
"IncomeRange": 6,
"PersonType": "NATURAL",
"Email": "victor@hugo.com",
"Id": "1169419",
"Tag": "custom tag",
"CreationDate": 1383321421,
"KYCLevel": "LIGHT"
},
"status": 200
})
params = {
"first_name": "Victor",
"last_name": "Hugo",
"address": "1 rue des Misérables, Paris",
"birthday": date.today(),
"nationality": "FR",
"country_of_residence": "FR",
"occupation": "Writer",
"income_range": 6,
"proof_of_identity": None,
"proof_of_address": None,
"person_type": "NATURAL",
"email": "victor@hugo.com",
"tag": "custom tag",
}
user = NaturalUser(**params)
self.assertIsNone(user.get_pk())
user.save()
self.assertIsInstance(user, NaturalUser)
for key, value in params.items():
self.assertEqual(getattr(user, key), value)
self.assertIsNotNone(user.get_pk())
previous_pk = user.get_pk()
user.last_name = 'Claver'
user.save()
self.assertEqual(previous_pk, user.get_pk())
self.assertEqual(user.last_name, 'Claver')
@responses.activate
def test_create_legal_user(self):
self.mock_legal_user()
self.register_mock({
'method': responses.PUT,
'url': re.compile(r'https://api.sandbox.mangopay.com/v2/chouette/users/legal/\d+'),
'body': {
"Name": "MangoPay edited",
"LegalPersonType": "BUSINESS",
"HeadquartersAddress": "1 rue MangoPay, Paris",
"LegalRepresentativeFirstName": "Mango",
"LegalRepresentativeLastName": "Pay",
"LegalRepresentativeEmail": "mango@mangopay.com",
"LegalRepresentativeBirthday": 1300186358,
"LegalRepresentativeNationality": "FR",
"LegalRepresentativeCountryOfResidence": "FR",
"PersonType": "LEGAL",
"Email": "info@mangopay.com",
"Id": "1169420",
"Tag": "custom tag",
"CreationDate": 1383322502,
"KYCLevel": "LIGHT"
},
'status': 200
})
params = {
"name": "MangoPay",
"legal_person_type": "BUSINESS",
"headquarters_address": "1 rue MangoPay, Paris",
"legal_representative_first_name": "Mango",
"legal_representative_last_name": "Pay",
"legal_representative_email": "mango@mangopay.com",
"legal_representative_birth | day": date.today(),
"legal_representative_nationality": "FR",
"legal_representative_country_of_residence": "FR",
"proof_of_registration": None,
"shareholder_declaration": None,
"legal_representative_address": None,
"statute": None,
"person_type": "LEGAL",
"email": "info@mangopay.com",
"tag": "custom tag",
| # "creation_date": datetime.now()
}
user = LegalUser(**params)
self.assertIsNone(user.get_pk())
user.save()
self.assertIsInstance(user, LegalUser)
for key, value in params.items():
self.assertEqual(getattr(user, key), value)
self.assertIsNotNone(user.get_pk())
previous_pk = user.get_pk()
user.last_name = 'Claver'
user.save()
self.assertEqual(previous_pk, user.get_pk())
self.assertEqual(user.last_name, 'Claver')
@responses.activate
def test_retrieve_natural_user(self):
self.mock_natural_user()
self.register_mock([
{
'method': responses.GET,
'url': 'https://api.sandbox.mangopay.com/v2/chouette/users/natural/1169419',
'body': {
"FirstName": "Victor",
"LastName": "Hugo",
"Address": "1 rue des Misérables, Paris",
"Birthday": int(time.mktime(date.today().timetuple())),
"Nationality": "FR",
"CountryOfResidence": "FR",
"Occupation": "Writer",
"IncomeRange": 6,
"ProofOfIdentity": None,
"ProofOfAddress": None,
"PersonType": "NATURAL",
"Email": "victor@hugo.com",
"Id": "1169419",
"Tag": "custom tag",
"CreationDate": 1383321421,
"KYCLevel": "LIGHT"
},
'status': 200
},
{
'method': responses.GET,
'url': 'https://api.sandbox.mangopay.com/v2/chouette/users/natural/1169420',
'body': {"errors": []},
'status': 404
}])
params = {
"first_name": "Victor",
"last_name": "Hugo",
"address": "1 rue des Misérables, Paris",
"birthday": date.today(),
"nationality": "FR",
"country_of_residence": "FR",
"occupation": "Writer",
"income_range": 6,
"proof_of_identity": None,
"proof_of_address": None,
"person_type": "NATURAL",
"email": "victor@hugo.com",
"tag": "custom tag",
}
user = NaturalUser(**params)
user.save()
self.assertRaises(NaturalUser.DoesNotExist, NaturalUser.get, user.get_pk() + 1)
self.assertIsNotNone(user.get_pk())
user = NaturalUser.get(user.get_pk())
self.assertIsNotNone(user.get_pk())
for key, value in params.items():
self.assertEqual(getattr(user, key), value)
@responses.activate
def test_retrieve_legal_user(self):
self.mock_legal_user()
self.register_mock([
{
'method': responses.GET,
'url': 'https://api.sandbox.mangopay.com/v2/chouette/users/legal/1169420',
'body': {
"Name": "MangoPay",
"LegalPersonType": "BUSINESS",
"HeadquartersAddress": "1 rue MangoPay, Paris",
"LegalRepresentativeFirstName": "Mango",
"LegalRepresentativeLastName": "Pay",
"LegalRepresentativeEmail": "mango@mangopay.com",
"LegalRepresentativeBirthday": int(time.mktime(date.today().timetuple())),
"LegalRepresentativeNationality": "FR",
"LegalRepresentativeCountryOfResidence": "FR",
"ProofOfRegistration": None,
"ShareholderDeclaration": None,
"LegalRepresentativeAddress": None,
"Statute": None,
"PersonType": "LEGAL",
"Email": "info@mangopay.com",
"Id": "1169420",
"Tag": "custom tag",
|
klahnakoski/MySQL-to-S3 | vendor/mo_logs/log_usingThreadedStream.py | Python | mpl-2.0 | 3,886 | 0.002316 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
from time import time
from mo_future import text_type
from mo_logs import Log
from mo_logs.log_usingNothing import StructuredLogger
from mo_logs.strings import expand_template
from mo_threads import Thread, THREAD_STOP, Till
DEBUG_LOGGING = False
class StructuredLogger_usingThreadedStream(StructuredLogger):
# stream CAN BE AN OBJCET WITH write() METHOD, OR A STRING
# WHICH WILL eval() TO ONE
def __init__(self, stream):
assert stream
use_UTF8 = False
if isinstance(stream, text_type):
if stream.startswith("sys."):
use_UTF8 = True # sys.* ARE OLD AND CAN NOT HANDLE unicode
self.stream = eval(stream)
name = stream
else:
self.stream = stream
name = "stream"
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from mo_threads import Queue
if use_UTF8:
def utf8_appender(value):
if isinstance(value, text_type):
value = value.encode('utf8')
self.stream.write(value)
appender = utf8_appender
else:
appender = self.stream.write
self.queue = Queue("queue for " + self.__class__.__name__ + "(" + name + ")", max=10000, silent=True)
self.thread = Thread("log to " + self.__class__.__name__ + "(" + name + ")", time_delta_pusher, appender=appender, queue=self.queue, interval=0.3)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception as e:
raise e # OH NO!
def stop(self):
try:
self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
except Exception as e:
if DEBUG_LOGGING:
raise e
try:
self.queue.close()
except Exception as f:
if DEBUG_LOGGING:
raise f
def time_delta_pusher(please_stop, appender, queue, interval):
"""
appender - THE FUNCTION THAT ACCEPTS A STRING
| queue - FILLED WITH LOG ENTRIES {"template":template, "params":params} TO WRITE
interval - timedelta
USE IN A THREAD TO BATCH LOGS BY TIME INTERVAL
"""
next_run = time() + interval
while not please_stop:
(Till(till=next_run) | please_stop).wait()
next_run = time() + interval
logs = queue.pop_all()
if not logs:
continue
lines = []
for log in logs:
| try:
if log is THREAD_STOP:
please_stop.go()
next_run = time()
else:
expanded = expand_template(log.get("template"), log.get("params"))
lines.append(expanded)
except Exception as e:
location = log.get('params', {}).get('location', {})
Log.warning("Trouble formatting log from {{location}}", location=location, cause=e)
# SWALLOW ERROR, GOT TO KEEP RUNNING
try:
appender(u"\n".join(lines) + u"\n")
except Exception as e:
sys.stderr.write(b"Trouble with appender: " + str(e.__class__.__name__) + b"\n")
# SWALLOW ERROR, MUST KEEP RUNNING
|
secnot/uva-onlinejudge-solutions | 10032 - Tug of War/main.py | Python | mit | 1,768 | 0.00509 | import sys
def load_num():
return int(sys.stdin.readline().rstrip())
def load_case():
_ = sys.stdin.readline() # Empty line
npeople = load_num()
return [load_num() for _ in range(npeople)]
def find_split(weights, total_weight):
reachable = [0 for _ in range(total_weight+1)]
reachable[0] = 1
# If the bit k of the number stored in reachable[n] is active
# that means that it is possible to select k out of all the
# candidates, so the sum of their weight is n..
# WARNING: Bitwise operations are too slow in python but it's still the
# fastest aproach.
for i, w in enumerate(weights):
for j in range(total_weight, -1, -1):
if reachable[j]:
reachable[j+w] |= (reachable[j]<<1)
# Search nearest value to the perfect split reachable with the sum
# of half of the weights
half_weight = 0
with_half_people = 1<<(len(weights)//2)
for i in range(total_weight//2):
if reachable[total_weight//2+i] & with_half_people:
half_weight = total_weight//2+i
break
if reachable[total_weight//2-i] & with_half_people:
half_weight = total_weight//2-i
break
return min(total_weight-half_weight, half_weight), max(total_weight-half_weight, half_weight)
def solve(weights):
total_weig | ht = sum(weights)
# Check limit cases
if len(weights) == 0:
return 0, 0
elif len(weights) == 1:
return 0, weights[0]
return find_split(weights, total_weight)
if __name__ == '__main__':
ncases = loa | d_num()
for c in range(ncases):
weights = load_case()
low, high = solve(weights)
print(low, high)
if c+1 < ncases:
print('')
|
Jumpers/MysoftAutoTest | Step1-PythonBasic/Practices/yuxq/16-17/ex16_2.py | Python | apache-2.0 | 107 | 0.037383 | from sys import argv
script,filename=argv
aread= | open(filename)
print aread.read()
aread.clo | se() |
cortesi/qtile | docs/sphinx_qtile.py | Python | mit | 6,730 | 0.00104 | # Copyright (c) 2015 dmpayton
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import importlib
from docutils import nodes
from docutils.statemachine import ViewList
from docutils.parsers.rst import Directive
from jinja2 import Template
from libqtile import command, configurable, widget
from six import class_types
from six.moves import builtins, reduce
from sphinx.util.nodes import nested_parse_with_titles
qtile_module_template = Template('''
.. qtile_class:: {{ module }}.{{ class_name }}
{% if no_config %}:no-config:{% endif %}
{% if no_commands %}:no-commands:{% endif %}
''')
qtile_class_template = Template('''
{{ class_name }}
{{ class_underline }}
.. autoclass:: {{ module }}.{{ class_name }}{% for arg in extra_arguments %}
{{ arg }}{% endfor %}
{% if is_widget %}
.. compound::
Supported bar orientations: {{ obj.orientations }}
{% endif %}
{% if configurable %}
.. list-table::
:widths: 20 20 60
:header-rows: 1
* - key
- default
- description
{% for key, default, description in defaults %}
* - ``{{ key }}``
- ``{{ default|pprint }}``
- {{ description }}
{% endfor %}
{% endif %}
{% if commandable %}
{% for cmd in commands %}
.. automethod:: {{ module }}.{{ class_name }}.{{ cmd }}
{% endfor %}
{% endif %}
''')
qtile_hooks_template = Template('''
.. automethod:: libqtile.hook.subscribe.{{ method }}
''')
# Adapted from sphinxcontrib-httpdomain
def import_object(module_name, expr):
mod = __import__(module_name)
mod = reduce(getattr, module_name.split('.')[1:], mod)
glob | als = builtins
if not isinstance(globals, dict):
globals = globals.__dict__
return eval(expr, globals, mod.__dict__)
class SimpleDirectiveMixin(object):
has_content = True
required_arguments = 1
def make_rs | t(self):
raise NotImplementedError
def run(self):
node = nodes.section()
node.document = self.state.document
result = ViewList()
for line in self.make_rst():
result.append(line, '<{0}>'.format(self.__class__.__name__))
nested_parse_with_titles(self.state, result, node)
return node.children
class QtileClass(SimpleDirectiveMixin, Directive):
optional_arguments = 2
def make_rst(self):
module, class_name = self.arguments[0].rsplit('.', 1)
arguments = self.arguments[1:]
obj = import_object(module, class_name)
is_configurable = ':no-config:' not in arguments
is_commandable = ':no-commands:' not in arguments
arguments = [i for i in arguments if i not in (':no-config:', ':no-commands:')]
# build up a dict of defaults using reverse MRO
defaults = {}
for klass in reversed(obj.mro()):
if not issubclass(klass, configurable.Configurable):
continue
if not hasattr(klass, "defaults"):
continue
klass_defaults = getattr(klass, "defaults")
defaults.update({
d[0]: d[1:] for d in klass_defaults
})
# turn the dict into a list of ("value", "default", "description") tuples
defaults = [
(k, v[0], v[1]) for k, v in sorted(defaults.items())
]
context = {
'module': module,
'class_name': class_name,
'class_underline': "=" * len(class_name),
'obj': obj,
'defaults': defaults,
'configurable': is_configurable and issubclass(obj, configurable.Configurable),
'commandable': is_commandable and issubclass(obj, command.CommandObject),
'is_widget': issubclass(obj, widget.base._Widget),
'extra_arguments': arguments,
}
if context['commandable']:
context['commands'] = [attr for attr in dir(obj)
if attr.startswith('cmd_')]
rst = qtile_class_template.render(**context)
for line in rst.splitlines():
yield line
class QtileHooks(SimpleDirectiveMixin, Directive):
def make_rst(self):
module, class_name = self.arguments[0].rsplit('.', 1)
obj = import_object(module, class_name)
for method in sorted(obj.hooks):
rst = qtile_hooks_template.render(method=method)
for line in rst.splitlines():
yield line
class QtileModule(SimpleDirectiveMixin, Directive):
# :baseclass: <base class path>
# :no-commands:
# :no-config:
optional_arguments = 4
def make_rst(self):
module = importlib.import_module(self.arguments[0])
BaseClass = None
if ':baseclass:' in self.arguments:
BaseClass = import_object(*self.arguments[
self.arguments.index(':baseclass:') + 1].rsplit('.', 1))
for item in dir(module):
obj = import_object(self.arguments[0], item)
if not isinstance(obj, class_types) and (BaseClass and
not isinstance(obj, BaseClass)):
continue
context = {
'module': self.arguments[0],
'class_name': item,
'no_config': ':no-config:' in self.arguments,
'no_commands': ':no-commands:' in self.arguments,
}
rst = qtile_module_template.render(**context)
for line in rst.splitlines():
if not line.strip():
continue
yield line
def setup(app):
app.add_directive('qtile_class', QtileClass)
app.add_directive('qtile_hooks', QtileHooks)
app.add_directive('qtile_module', QtileModule)
|
raphaelsoul/supermilai | account/admin.py | Python | bsd-2-clause | 918 | 0.057734 | from django.contrib import admin
from account.models import UserProfile
from django.contrib.auth.models import Permission, Group
class UserProfileAdmin(admin.ModelAdmin):
#,'qq','first_name','last_name','truename','email','groups'
fieldsets = [
(None,{'fields':['username']}),
('Profile',{'fields':['last_name','f | irst_name','truename']}),
('Contact Information',{'fields':['email','qq']}),
('Permission Type',{'fields' | :['is_superuser','is_staff','is_active','groups']}),
#('My Permission Type',{'fields':['mygroups']}),
#('Timestamp',{'fields':['date_joined','last_login']}),
]
list_display = (
'username',
'truename',
'email',
'is_superuser',
'is_staff',
'date_joined',
'last_login',
#'groups',
)
admin.site.register(UserProfile,UserProfileAdmin)
class PermissionAdmin(admin.ModelAdmin):
list_display = ('name','content_type','codename')
admin.site.register(Permission,PermissionAdmin)
|
andersonsilvade/5semscript | tekton/gae/middleware/email_errors.py | Python | mit | 2,781 | 0.002517 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import logging
import traceback
import time
from google.appengine.api import app_identity, mail, capabilities
from google.appengine.runtime import DeadlineExceededError
from tekton.gae.middleware import Middleware
from tekton.router import PathNotFound
def get_apis_statuses(e):
if not isinstance(e, DeadlineExceededError):
return {}
t1 = time.time()
statuses = {
'blobstore': capabilities.CapabilitySet('blobstore').is_enabled(),
'datastore_v3': capabilities.CapabilitySet('datastore_v3').is_enabled(),
'datastore_v3_write': capabilities.CapabilitySet('datastore_v3', ['write']).is_enabled(),
'images': capabilities.CapabilitySet('images').is_enabled(),
'mail': capabilities.CapabilitySet('mail').is_enabled(),
'memcache': capabilities.CapabilitySet('memcache').is_enabled(),
'taskqueue': capabilities.CapabilitySet('taskqueue').is_enabled(),
'urlfetch': capabilities.CapabilitySet('urlfetch').is_enabled(),
}
t2 = time.time()
statuses['time'] = t2 - t1
return statuses
def send_error_to_admins(settings, exception, handler, render, template):
tb = traceback.format_exc()
errmsg = exception.message
logging.error(errmsg)
logging.error(tb)
handler.response.write(render(template))
appid = app_identity.get_application_id()
subject = 'ERROR in %s: [%s] %s' % (appid, handler.request.path, errmsg)
body = """
------------- request ------------
%s
----------------------------------
------------- GET params ---------
%s
----------------------------------
----------- POST params ----------
%s
----------------------------------
----------- traceback ------------
%s
----------------------------------
""" % (handler.request, handler.request.GET, handler.request.POST, tb)
body += 'API statuses = ' + json.dumps(get_apis_statuses(exception), indent=4)
mail.send_mail_to_admins(sender=settings.SENDER_EMAIL,
subject=subject,
body=body)
class EmailMiddleware(Middleware):
def handle_error(self, exception):
import settings # workaround. See https://github.com/renzon/zenwarch/issues/3
if | isinstance(exception, PathNotFound):
self.handler.response.set_status(404)
send_error_to_admins(settings, exception, self.handler, self.dependencies['_render'],
settings.TEMPLATE_404_ERROR)
else:
self.han | dler.response.set_status(400)
send_error_to_admins(settings, exception, self.handler, self.dependencies['_render'],
settings.TEMPLATE_400_ERROR)
|
s20121035/rk3288_android5.1_repo | cts/suite/audio_quality/test_description/processing/recording_thd.py | Python | gpl-3.0 | 2,834 | 0.001764 | #!/usr/bin/python
# Copyright (C) 2012 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from consts import *
import numpy as np
import scipy as sp
from calc_thd import *
import calc_delay
# calculate THD for dut_recording_thd case
# Input: host recording (mono), device recording (mono),
# frequency of sine in Hz (i64)
# THD pass level in percentile (double)
# Output:THD host (double), THD device (double) in percentile
# host recording will be longer than device recording
# the function works in following steps:
# 1. match the start of device recording with host recording
# As the host recording starts eariler and longer than device | recording,
# matching process is required.
# 2. calculate THD of host recording and client recording
# 3. check pass/fail
def recording_thd(inputData, inputTypes):
| output = []
outputData = []
outputTypes = []
# basic sanity check
inputError = False
if (inputTypes[0] != TYPE_MONO):
inputError = True
if (inputTypes[1] != TYPE_MONO):
inputError = True
if (inputTypes[2] != TYPE_I64):
inputError = True
if (inputTypes[3] != TYPE_DOUBLE):
inputError = True
if inputError:
output.append(RESULT_ERROR)
output.append(outputData)
output.append(outputTypes)
return output
hostRecording = inputData[0]
deviceRecording = inputData[1]
signalFrequency = inputData[2]
thdPassPercentile = inputData[3]
samplingRate = 44100
delay = calc_delay.calc_delay(hostRecording, deviceRecording)
N = len(deviceRecording)
print "delay ", delay, "deviceRecording samples ", N
thdHost = calc_thd(hostRecording[delay:delay+N], signalFrequency, samplingRate, 0.02) * 100
thdDevice = calc_thd(deviceRecording, signalFrequency, samplingRate, 0.02) * 100
print "THD Host %", thdHost, "THD device %", thdDevice, "Margain % ", thdPassPercentile
if (thdDevice < (thdHost + thdPassPercentile)) and (thdHost < thdPassPercentile):
output.append(RESULT_PASS)
else:
output.append(RESULT_OK)
outputData.append(thdHost)
outputTypes.append(TYPE_DOUBLE)
outputData.append(thdDevice)
outputTypes.append(TYPE_DOUBLE)
output.append(outputData)
output.append(outputTypes)
return output
|
AnykeyNL/uArmProPython | test/release_test.py | Python | gpl-3.0 | 1,108 | 0.020758 | # uArm Swift Pro - Python Library Example
# Created by: Richard Garsthagen - the.anykey@gmail.com
# V0 | .3 - June 2018 - Still under development
#
# Use Python 2.x!
import uArmRobot
import time
import easycv2
points = []
coins = []
totalP = 4
speed = 80
#C | onfigure Serial Port
serialport = "com10" # for windows
#serialport = "/dev/ttyACM0" # for linux like system
# Connect to uArm
myRobot = uArmRobot.robot(serialport,1) # user 0 for firmware < v4 and use 1 for firmware v4
myRobot.debug = True # Enable / Disable debug output on screen, by default disabled
myRobot.connect()
myRobot.mode(0) # Set mode to Normal
time.sleep(1)
# Move robot, command will complete when motion is completed
myRobot.goto(120, 0, 30,20)
time.sleep(1)
print ("Releasing motors")
myRobot.motors_on(False)
raw_input("press enter")
myRobot.get_coor()
x,y,z = [myRobot.X, myRobot.Y, myRobot.Z]
print ("X: {} Y:{} Z:{}".format(x,y,z))
myRobot.motors_on(True)
myRobot.goto(120, 0, 30,20)
time.sleep(1)
myRobot.goto(x,y,30,20)
myRobot.goto(x,y,z,20)
#Disconnect serial connection
myRobot.disconnect()
|
pfalcon/micropython | tests/basics/int_divzero.py | Python | mit | 146 | 0 | try:
| 1 // 0
except ZeroDivisionError:
print("ZeroDivisionError")
try:
1 % 0
except ZeroDivisionError:
pri | nt("ZeroDivisionError")
|
gratefulfrog/lib | python/pymol/computing.py | Python | gpl-2.0 | 10,218 | 0.013114 | #A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
import cmd as cmd_module
from cmd import _cmd, lock, unlock, Shortcut, \
_feedback, fb_module, fb_mask, \
DEFAULT_ERROR, DEFAULT_SUCCESS, _raising, is_ok, is_error, \
is_list, safe_list_eval, is_string
import string
import traceback
import threading
import os
def model_to_sdf_list(self_cmd,model):
from chempy import io
sdf_list = io.mol.toList(model)
fixed = []
restrained = []
at_id = 1
for atom in model.atom:
if atom.flags & 4:
if hasattr(atom,'ref_coord'):
restrained.append( [at_id,atom.ref_coord])
if atom.flags & 8:
fixed.append(at_id)
at_id = at_id + 1
fit_flag = 1
if len(fixed):
fit_flag = 0
sdf_list.append("> <FIXED_ATOMS>\n")
sdf_list.append("+ ATOM\n");
for ID in fixed:
sdf_list.append("| %4d\n"%ID)
sdf_list.append("\n")
if len(restrained):
fit_flag = 0
sdf_list.append("> <RESTRAINED_ATOMS>\n")
sdf_list.append("+ ATOM MIN MAX F_CONST X Y Z\n")
for entry in restrained:
xrd = entry[1]
sdf_list.append("| %4d %6.3f %6.3f %6.3f %10.4f %10.4f %10.4f\n"%
(entry[0],0,0,3,xrd[0],xrd[1],xrd[2]))
sdf_list.append("\n")
electro_mode = int(self_cmd.get('clean_electro_mode'))
if electro_mode == 0:
fit_flag = 0
sdf_list.append("> <ELECTROSTATICS>\n")
sdf_list.append("+ TREATMENT\n")
sdf_list.append("| NONE\n")
sdf_list.append("\n")
sdf_list.append("$$$$\n")
# for line in sdf_list:
# print line,
return (fit_flag, sdf_list)
def get_energy_from_rec(rec):
# we really need to replace this with a proper SD parser...
result = 9999.00
try:
rec_list = rec.splitlines()
read_energy = 0
for line in rec_list:
if read_energy == 1:
result = float(line.strip())
break
if line.strip() == '> <MMFF94 energy>':
read_energy = 1
except:
traceback.print_exc()
return result
class CleanJob:
def __init__(self,self_cmd,sele,state=-1,message=None):
self.cmd = self_cmd
if message == '':
message = None
if state<1:
state = self_cmd.get_state()
# this code will moved elsewhere
ok = 1
try:
from freemol import mengine
except:
ok = 0
print "Error: unable to import freemol.mengine module."
print "This PyMOL build appears not to include full modeling capabilities."
if ok:
if not mengine.validate():
ok = 0
print "Error: Unable to validate freemol.mengine"
if ok:
if self_cmd.count_atoms(sele) > 999:
ok = 0
print "Error: Sorry, clean is currently limited to 999 atoms"
if not ok:
pass
# we can't call warn because this is the not the tcl-tk gui thread
# warn("Please be sure that FreeMOL is correctly installed.")
else:
if message != None:
self.cmd.do("_ cmd.wizard('message','''%s''')"%message)
obj_list = self_cmd.get_object_list("bymol ("+sele+")")
ok = 0
result = None
if is_list(obj_list) and (len(obj_list)==1):
obj_name = obj_list[0]
self_cmd.sculpt_deactivate(obj_name)
# eliminate all sculpting information for object
self.cmd.sculpt_purge()
self.cmd.set("sculpting",0)
state = self_cmd.get_state()
if self_cmd.count_atoms(obj_name+" and flag 2"): # any atoms restrained?
self_cmd.reference("validate",obj_name,state) # then we have reference coordinates
input_model = self_cmd.get_model(obj_name,state=state)
(fit_flag, sdf_list) = model_to_sdf_list(self_cmd,input_model)
input_sdf = string.join(sdf_list,'')
# print input_sdf
result = mengine.run(input_sdf)
if result != None:
if len(result):
clean_sdf = result[0]
clean_rec = clean_sdf.split("$$$$")[0]
energy = get_energy_from_rec(clean_rec)
if len(clean_rec) and int(energy) != 9999:
clean_name = "builder_clean_tmp"
self_cmd.set("suspend_updates")
try:
self_cmd.read_molstr(clean_rec, clean_name, zoom=0)
# need to insert some error checking here
if clean_name in self_cmd.get_names("objects"):
self_cmd.set("retain_order","1",clean_name)
if fit_flag:
self_cmd.fit(clean_name, obj_name, matchmaker=4,
mobile_state=1, target_state=state)
self_cmd.push_undo(obj_name)
self_cmd.update(obj_name, clean_name, matchmaker=0,
source_state=1, target_state=state)
self_cmd.sculpt_activate(obj_name)
self_cmd.sculpt_deactivate(obj_name)
ok = 1
finally:
self_cmd.delete(clean_name)
self_cmd.unset("suspend_updates")
if not ok:
# we can't call warn because this is the not the tcl-tk gui thread
if result != None:
if len(result)>1:
print "\n=== mengine errors below === "
print result[1].replace("\n\n","\n"),
print "=== mengine errors above ===\n"
failed_file = "cleanup_failed.sdf"
print "Clean-Error: Structure cleanup failed. Invalid input or software malfuction?"
arom | atic = 0
for bond in input_model.bond:
if bond.order == 4:
aromatic = 1
try:
open(failed_file,'wb').write(input_sdf)
print "Clean-Error: Wrote | SD file '%s' into the directory:"%failed_file
print "Clean-Error: '%s'."%os.getcwd()
print "Clean-Error: If you believe PyMOL should be able to handle this structure"
print "Clean-Error: then please email that SD file to help@schrodinger.com. Thank you!"
except IOError:
print "Unabled to write '%s"%failed_file
if aromatic:
print "Clean-Warning: Please eliminate aromatic bonds and then try again."
if message!=None:
self_cmd.do("_ wizard")
def _clean(selection, present='', state=-1, fix='', restrain='',
method='mmff', async=0, save_undo=1, message=None,
_self=cmd_module):
self_cmd = _self
clean1_sele = "_clean1_tmp"
clean2_sele = "_clean2_tmp"
clean_obj = "_clean_obj"
|
oblique-labs/pyVM | rpython/jit/backend/zarch/test/test_float.py | Python | mit | 467 | 0.002141 | import py
from rpython.jit.backend.zarch.test.support import | JitZARCHMixin
from rpython.jit.metainterp.test.test_float import FloatTests
from rpython.jit.backend.detect_cpu import getcpuclass
CPU = getcpuclass()
class TestFloat | (JitZARCHMixin, FloatTests):
# for the individual tests see
# ====> ../../../metainterp/test/test_float.py
if not CPU.supports_singlefloats:
def test_singlefloat(self):
py.test.skip('requires singlefloats')
|
ClearcodeHQ/matchbox | src/matchbox/__init__.py | Python | lgpl-3.0 | 925 | 0 | # Copyright | (C) 2015 by Clearcode <http://clearcode.cc>
# and associates (see A | UTHORS).
# This file is part of matchbox.
# matchbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# matchbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with matchbox. If not, see <http://www.gnu.org/licenses/>.
"""Main matchbox module."""
from matchbox.box import MatchBox
from matchbox.index import MatchIndex
__version__ = "1.1.1"
__all__ = ("MatchBox", "MatchIndex")
|
nmoutschen/tsurvey | src/tsurvey/__init__.py | Python | mit | 38 | 0 | """
Anonymous token-based | surve | ys
"""
|
kavyasukumar/django-calaccess-raw-data | calaccess_raw/admin/base.py | Python | mit | 203 | 0 | from django.contrib import admin
class BaseAdmin(admin.ModelAdmin):
save_on_top = True
def get_readonly_fields(self, *args, **kwargs):
| return [f.name for f in se | lf.model._meta.fields]
|
Nilpo/RPi-Samples | metronome_led.py | Python | mit | 1,233 | 0.055961 | from time import sleep
import sys
import os
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
p1 = 14
p2 = 15
GPIO.setup(p1, GPIO.OUT)
GPIO.setup(p2, GPIO.OUT)
class Quit(Exception): pass
def green(duration):
GPIO.output(p1, False)
GPIO.output(p2, True)
sl | eep(duration)
GPIO.output(p2, False)
def red(duration):
GPIO.output(p2, False)
GPIO.output(p1, True)
sleep(duration)
GPIO.output(p1, False)
def main():
while True:
# The standard Linux clear screen cmmand.
n=os.sy | stem("clear")
while True:
beats=raw_input("Enter any whole number from 30 to 400 (bpm), Q or X to quit. (100): ")
if beats.isdigit():
if beats <= 400 and beats >= 30:
break
elif beats.upper() == "Q" or beats.upper() == "X":
raise Quit
elif beats == "":
beats = 100
break
# Now convert to the floating point value for the time.sleep() function.
beat=((60/float(beats))-0.125)
print("Press Ctrl-C to enter another speed...")
n = 0
while True:
try:
n += 1
if n < 4:
green(beat/2)
else:
n = 0
red(beat/2)
sleep(beat/2)
except KeyboardInterrupt:
GPIO.cleanup()
break
try:
main()
GPIO.cleanup()
except Quit:
GPIO.cleanup() |
andrewgleave/whim | web/whim/core/time.py | Python | mit | 354 | 0.002825 | from datetime import datetime, timezone, time
import dateparser
def zero_time_with_timezone(date, tz=timezone.utc):
return datetime.combine(date, time(tzinfo=tz))
de | f attem | pt_parse_date(val):
parsed_date = dateparser.parse(val, languages=['en'])
if parsed_date is None:
# try other strategies?
pass
return parsed_date |
harshilasu/LinkurApp | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/emr/connection.py | Python | gpl-3.0 | 28,351 | 0.000917 | # Copyright (c) 2010 Spotify AB
# Copyright (c) 2010-2011 Yelp
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a connection to the EMR service
"""
import types
import boto
import boto.utils
from boto.ec2.regioninfo import RegionInfo
from boto.emr.emrobject import AddInstanceGroupsResponse, BootstrapActionList, \
Cluster, ClusterSummaryList, HadoopStep, \
InstanceGroupList, InstanceList, JobFlow, \
JobFlowStepList, \
ModifyInstanceGroupsResponse, \
RunJobFlowResponse, StepSummaryList
from boto.emr.step import JarStep
from boto.connection import AWSQueryConnection
from boto.exception import EmrResponseError
class EmrConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')
DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',
'elasticmapreduce.us-east-1.amazonaws.com')
ResponseError = EmrResponseError
# Constants for AWS Console debugging
DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(EmrConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs,
profile_name=profile_name)
# Many of the EMR hostnames are of the form:
# <region>.<service_name>.amazonaws.com
# rather than the more common:
# <service_name>.<region>.amazonaws.com
# so we need to explicitly set the region_name and service_name
# for the SigV4 signing.
self.auth_region_name = self.region.name
self.auth_service_name = 'elasticmapreduce'
def _required_auth_capability(self):
return ['hmac-v4']
def describe_cluster(self, cluster_id):
"""
Describes an Elastic MapReduce cluster
:type cluster_id: str
:param cluster_id: The cluster id of interest
"""
params = {
'ClusterId': cluster_id
}
return self.get_object('DescribeCluster', params, Cluster)
def describe_jobflow(self, jobflow_id):
"""
Describes a single Elastic MapReduce job flow
:type jobflow_id: str
:param jobflow_id: The job flow id of interest
"""
jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])
if jobflows:
return jobflows[0]
def describe_jobflows(self, states=None, jobflow_ids=None,
created_after=None, created_before=None):
"""
Retrieve all the Elastic MapReduce job flows on your account
:type states: list
:param states: A list of strings with job flow states wanted
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
:type created_after: datetime
:param created_after: Bound on job flow creation time
:type created_before: dateti | me
:param created_before: Bound on job flow creation time
"""
params = {}
if states:
self.build_list_params(params, states, 'JobFlowStates.member')
if jobflow_ids:
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
| if created_after:
params['CreatedAfter'] = created_after.strftime(
boto.utils.ISO8601)
if created_before:
params['CreatedBefore'] = created_before.strftime(
boto.utils.ISO8601)
return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])
def describe_step(self, cluster_id, step_id):
"""
Describe an Elastic MapReduce step
:type cluster_id: str
:param cluster_id: The cluster id of interest
:type step_id: str
:param step_id: The step id of interest
"""
params = {
'ClusterId': cluster_id,
'StepId': step_id
}
return self.get_object('DescribeStep', params, HadoopStep)
def list_bootstrap_actions(self, cluster_id, marker=None):
"""
Get a list of bootstrap actions for an Elastic MapReduce cluster
:type cluster_id: str
:param cluster_id: The cluster id of interest
:type marker: str
:param marker: Pagination marker
"""
params = {
'ClusterId': cluster_id
}
if marker:
params['Marker'] = marker
return self.get_object('ListBootstrapActions', params, BootstrapActionList)
def list_clusters(self, created_after=None, created_before=None,
cluster_states=None, marker=None):
"""
List Elastic MapReduce clusters with optional filtering
:type created_after: datetime
:param created_after: Bound on cluster creation time
:type created_before: datetime
:param created_before: Bound on cluster creation time
:type cluster_states: list
:param cluster_states: Bound on cluster states
:type marker: str
:param marker: Pagination marker
"""
params = {}
if created_after:
params['CreatedAfter'] = created_after.strftime(
boto.utils.ISO8601)
if created_before:
params['CreatedBefore'] = created_before.strftime(
boto.utils.ISO8601)
if marker:
params['Marker'] = marker
if cluster_states:
self.build_list_params(params, cluster_states, 'ClusterStates.member')
return self.get_object('ListClusters', params, ClusterSummaryList)
def list_instance_groups(self, cluster_id, marker=None):
"""
List EC2 instance groups in a cluster
:type cluster_id: str
:param cluster_id: The cluster id of interest
:type marker: str
:param marker: Pagination marker
"""
params = {
'ClusterId': cluster_id
}
|
jjgomera/pychemqt | lib/EoS/Cubic/PRSV2.py | Python | gpl-3.0 | 2,556 | 0 | #!/usr/bin/python3
# -*- coding: utf-8 | -*-
r"""Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <j | jgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
from lib.EoS.Cubic.PRSV import PRSV, dat
class PRSV2(PRSV):
r"""Peng-Robinson cubic equation of state with a modified dependence of
temperature by Stryjek-Vera v.2 [1]_
.. math::
\begin{array}[t]{l}
P = \frac{RT}{V-b}-\frac{a}{V\left(V+b\right)+b\left(V-b\right)}\\
a = 0.45747\frac{R^2T_c^2}{P_c}\alpha\\
b = 0.0778\frac{RT_c}{P_c}\\
\alpha^{0.5} = 1 + k\left(1-Tr^{0.5}\right)\\
k = k_0+\left[k_1+k_2\left(k_3-T_r\right)\left(1-\sqrt{T_r}\right)
\right]\left(1+\sqrt{T_r}\right)\left(0.7-T_r\right)\\
k_0 = 0.378893+1.4897153\omega-0.17131848\omega^2+0.0196554\omega^3\\
\end{array}
:math:`k_1`, :math:`k_2` and :math:`k_3` are parameters characteristic
compound specific
"""
__title__ = "PRSV2 (1986)"
__status__ = "PRSV2"
__doi__ = {
"autor": "Stryjek, R., Vera, J.H.",
"title": "PRSV2: A Cubic Equation of State for Accurate Vapor—Liquid "
"Equilibria calculations",
"ref": "Can. J. Chem. Eng. 64 (1986) 820–826",
"doi": "10.1002/cjce.5450640516"},
def _k(self, cmp, Tr):
# Eq 11
ko = 0.378893 + 1.4897153*cmp.f_acent - \
0.17131848*cmp.f_acent**2 + 0.0196554*cmp.f_acent**3
if cmp.id in dat and Tr < 1:
k1, k2, k3 = dat[cmp.id]
else:
k1, k2, k3 = 0, 0, 0
k = ko + (k1+k2*(k3-Tr)*(1+Tr**0.5))*(1+Tr**0.5)*(0.7-Tr)
return k
if __name__ == "__main__":
from lib.mezcla import Mezcla
mix = Mezcla(5, ids=[4], caudalMolar=1, fraccionMolar=[1])
eq = PRSV2(300, 9.9742e5, mix)
print('%0.0f %0.1f' % (eq.Vg.ccmol, eq.Vl.ccmol))
eq = PRSV2(300, 42.477e5, mix)
print('%0.1f' % (eq.Vl.ccmol))
|
johnkeepmoving/oss-ftp | python27/win32/Lib/site-packages/pyftpdlib/servers.py | Python | mit | 20,552 | 0 | # Copyright (C) 2007-2016 Giampaolo Rodola' <g.rodola@gmail.com>.
# Use of this source code is governed by MIT license that can be
# found in the LICENSE file.
"""
This module contains the main FTPServer class which listens on a
host:port and dispatches the incoming connections to a handler.
The concurrency is handled asynchronously by the main process thread,
meaning the handler cannot block otherwise the whole server will hang.
Other than that we have 2 subclasses changing the asynchronous concurrency
model using multiple threads or processes.
You might be interested in these in case your code contains blocking
parts which cannot be adapted to the base async model or if the
underlying filesystem is particularly slow, see:
https://github.com/giampaolo/pyftpdlib/issues/197
https://github.com/giampaolo/pyftpdlib/issues/212
Two classes are provided:
- ThreadingFTPServer
- MultiprocessFTPServer
...spawning a new thread or process every time a client connects.
The main thread will be async-based and be used only to accept new
connections.
Every time a new connection comes in that will be dispatched to a
separate thread/process which internally will run its own IO loop.
This way the handler handling that connections will be free to block
without hanging the whole FTP server.
"""
import errno
import os
import select
import signal
import sys
import time
import traceback
from .ioloop import Acceptor
from .ioloop import IOLoop
from .log import config_logging
from .log import debug
from .log import is_logging_configured
from .log import logger
__all__ = ['FTPServer']
_BSD = 'bsd' in sys.platform
# ===================================================================
# --- base class
# ===================================================================
class FTPServer(Acceptor):
"""Creates a socket listening on <address>, dispatching the requests
to a <handler> (typically FTPHandler class).
Depending on the type of address specified IPv4 or IPv6 connections
(or both, depending from the underlying system) will be accepted.
All relevant session information is stored in class attributes
described below.
- (int) max_cons:
number of maximum simultaneous connections accepted (defaults
to 512). Can be set to 0 for unlimited but it is recommended
to always have a limit to avoid running out of file descriptors
(DoS).
- (int) max_cons_per_ip:
number of maximum connections accepted for the same IP address
(defaults to 0 == unlimited).
"""
max_cons = 512
max_cons_per_ip = 0
def __init__(self, address_or_socket, handler, ioloop=None, backlog=100):
"""Creates a soc | ket listening on 'address' dispatching
connections to a 'handler'.
- (tuple) address_or_socket: the (host, port) pair on which
the command channel will listen for incoming connections or
an existent socket object.
- (instance) handler: the handler class to use.
- (instance) ioloop: a pyftpdlib.ioloop.IOLoop instance
- ( | int) backlog: the maximum number of queued connections
passed to listen(). If a connection request arrives when
the queue is full the client may raise ECONNRESET.
Defaults to 5.
"""
Acceptor.__init__(self, ioloop=ioloop)
self.handler = handler
self.backlog = backlog
self.ip_map = []
# in case of FTPS class not properly configured we want errors
# to be raised here rather than later, when client connects
if hasattr(handler, 'get_ssl_context'):
handler.get_ssl_context()
if callable(getattr(address_or_socket, 'listen', None)):
sock = address_or_socket
sock.setblocking(0)
self.set_socket(sock)
else:
self.bind_af_unspecified(address_or_socket)
self.listen(backlog)
@property
def address(self):
return self.socket.getsockname()[:2]
def _map_len(self):
return len(self.ioloop.socket_map)
def _accept_new_cons(self):
"""Return True if the server is willing to accept new connections."""
if not self.max_cons:
return True
else:
return self._map_len() <= self.max_cons
def _log_start(self):
def get_fqname(obj):
try:
return obj.__module__ + "." + obj.__class__.__name__
except AttributeError:
try:
return obj.__module__ + "." + obj.__name__
except AttributeError:
return str(obj)
if not is_logging_configured():
# If we get to this point it means the user hasn't
# configured any logger. We want logging to be on
# by default (stderr).
config_logging()
if self.handler.passive_ports:
pasv_ports = "%s->%s" % (self.handler.passive_ports[0],
self.handler.passive_ports[-1])
else:
pasv_ports = None
addr = self.address
if hasattr(self.handler, 'ssl_protocol'):
proto = "FTP+SSL"
else:
proto = "FTP"
logger.info(">>> starting %s server on %s:%s, pid=%i <<<"
% (proto, addr[0], addr[1], os.getpid()))
if ('ThreadedFTPServer' in __all__ and
issubclass(self.__class__, ThreadedFTPServer)):
logger.info("concurrency model: multi-thread")
elif ('MultiprocessFTPServer' in __all__ and
issubclass(self.__class__, MultiprocessFTPServer)):
logger.info("concurrency model: multi-process")
elif issubclass(self.__class__, FTPServer):
logger.info("concurrency model: async")
logger.info("masquerade (NAT) address: %s",
self.handler.masquerade_address)
logger.info("passive ports: %s", pasv_ports)
logger.debug("poller: %r", get_fqname(self.ioloop))
logger.debug("authorizer: %r", get_fqname(self.handler.authorizer))
if os.name == 'posix':
logger.debug("use sendfile(2): %s", self.handler.use_sendfile)
logger.debug("handler: %r", get_fqname(self.handler))
logger.debug("max connections: %s", self.max_cons or "unlimited")
logger.debug("max connections per ip: %s",
self.max_cons_per_ip or "unlimited")
logger.debug("timeout: %s", self.handler.timeout or "unlimited")
logger.debug("banner: %r", self.handler.banner)
logger.debug("max login attempts: %r", self.handler.max_login_attempts)
if getattr(self.handler, 'certfile', None):
logger.debug("SSL certfile: %r", self.handler.certfile)
if getattr(self.handler, 'keyfile', None):
logger.debug("SSL keyfile: %r", self.handler.keyfile)
def serve_forever(self, timeout=None, blocking=True, handle_exit=True):
"""Start serving.
- (float) timeout: the timeout passed to the underlying IO
loop expressed in seconds (default 1.0).
- (bool) blocking: if False loop once and then return the
timeout of the next scheduled call next to expire soonest
(if any).
- (bool) handle_exit: when True catches KeyboardInterrupt and
SystemExit exceptions (generally caused by SIGTERM / SIGINT
signals) and gracefully exits after cleaning up resources.
Also, logs server start and stop.
"""
if handle_exit:
log = handle_exit and blocking
if log:
self._log_start()
try:
self.ioloop.loop(timeout, blocking)
except (KeyboardInterrupt, SystemExit):
logger.info("received interrupt signal")
if blocking:
if log:
logger.info(
">>> shutting down FTP server (%s active socket "
"fds) <<<",
self._map_len())
self.close_all()
else:
|
tobami/littlechef | tests/test_command.py | Python | apache-2.0 | 10,393 | 0.000192 | import unittest
import subprocess
import os
import platform
import shutil
from os.path import join, normpath, abspath, split
import sys
env_path = "/".join(os.path.dirname(os.path.abspath(__file__)).split('/')[:-1])
sys.path.insert(0, env_path)
import littlechef
# Set some convenience variables
test_path = split(normpath(abspath(__file__)))[0]
littlechef_top = normpath(join(test_path, '..'))
if platform.system() == 'Windows':
fix = join(littlechef_top, 'fix.cmd')
WIN32 = True
else:
fix = join(littlechef_top, 'fix')
WIN32 = False
class BaseTest(unittest.TestCase):
def setUp(self):
"""Change to the test directory"""
self.set_location()
def set_location(self, location=test_path):
"""Change directories to a known location"""
os.chdir(location)
def execute(self, call):
"""Executes a command and returns stdout and stderr"""
if WIN32:
proc = subprocess.Popen(call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return proc.communicate()
class TestConfig(BaseTest):
def tearDown(self):
self.set_location()
def test_not_a_kitchen(self):
"""Should exit with error when not a kitchen directory"""
# Change to parent dir, which has no nodes/cookbooks/roles dir
self.set_location(littlechef_top)
# Call fix from the current directory above "tests/"
resp, error = self.execute([fix, 'node:a'])
self.assertTrue("Fatal error" in error, resp)
self.assertTrue(
'No {0} file found'.format(littlechef.CONFIGFILE) in error, error)
self.assertEquals(resp, "", resp)
def test_version(self):
"""Should output the correct Little Chef version"""
resp, error = self.execute([fix, '-v'])
self.assertEquals(resp, "",
"Response should be empty, version should be in stderr")
self.assertTrue(
'LittleChef {0}'.format(littlechef.__version__) in error)
def test_list_commands(self):
"""Should output a list of available commands"""
resp, error = self.execute([fix, '-l'])
self.assertEquals(error, "")
expected = "Starts a Chef Solo configuration run"
self.assertTrue(expected in resp)
commands = resp.split('\nAvailable commands:\n')[-1]
commands = filter(None, commands.split('\n'))
self.assertEquals(len(commands), 21)
def test_verbose(self):
"""Should turn on verbose output"""
resp, error = self.execute([fix, '--verbose', 'list_nodes'])
self.assertEquals(error, "", error)
self.assertTrue('Verbose output on' in resp, resp)
def test_debug(self):
"""Should turn on debug loglevel"""
resp, error = self.execute([fix, '--debug', 'list_nodes'])
self.assertEquals(error, "", error)
self.assertTrue('Debug level on' in resp, resp)
|
class TestEnvironment(BaseTest):
def test_no_valid_value(self):
"""Should error out when the env value is empty or is a fabric task"""
resp, error = self.execute([fix, 'list_nodes', '--env'])
self.assertEquals(resp, "")
self.assertTrue(
"error: argument -e/--env: expected one argument" in error, error)
resp, error = self.execute([fix, '--env', 'list_nodes'])
| self.assertEquals(resp, "")
self.assertTrue("error: No value given for --env" in error, error)
cmd = [fix, '--env', 'nodes_with_role:base', 'role:base']
resp, error = self.execute(cmd)
self.assertEquals(resp, "")
self.assertTrue("error: No value given for --env" in error, error)
def test_valid_environment(self):
"""Should set the chef_environment value when one is given"""
resp, error = self.execute([fix, 'list_nodes', '--env', 'staging'])
self.assertEquals(error, "", error)
self.assertTrue("Environment: staging" in resp, resp)
class TestRunner(BaseTest):
def test_no_node_given(self):
"""Should abort when no node is given"""
resp, error = self.execute([fix, 'node:'])
self.assertTrue("Fatal error: No node was given" in error)
def test_plugin(self):
"""Should execute the given plugin"""
resp, error = self.execute([fix, 'node:testnode1', 'plugin:notthere'])
expected = ", could not find 'notthere.py' in the plugin directory"
self.assertTrue(expected in error, resp + error)
resp, error = self.execute([fix, 'node:testnode1', 'plugin:bad'])
expected = "Found plugin 'bad', but it seems to have a syntax error:"
expected += " invalid syntax (bad.py, line 6)"
self.assertTrue(expected in error, resp + error)
resp, error = self.execute([fix, 'node:testnode1', 'plugin:dummy'])
expected = "Executing plugin '{0}' on {1}".format("dummy", "testnode1")
self.assertTrue(expected in resp, resp + error)
def test_list_plugins(self):
"""Should print a list of available plugins"""
resp, error = self.execute([fix, 'list_plugins'])
self.assertTrue("List of available plugins:" in resp, resp)
self.assertTrue("bad: Plugin has a syntax error" in resp, resp)
self.assertTrue("dummy: Dummy LittleChef plugin" in resp, resp)
class TestCookbooks(BaseTest):
def test_list_recipes(self):
"""Should list available recipes"""
resp, error = self.execute([fix, 'list_recipes'])
self.assertEquals(error, "")
self.assertTrue('subversion::client' in resp)
self.assertTrue('subversion::server' in resp)
def test_list_recipes_site_cookbooks(self):
"""Should give priority to site-cookbooks information"""
resp, error = self.execute([fix, 'list_recipes'])
self.assertTrue('Modified by site-cookbooks' in resp)
def test_list_recipes_detailed(self):
"""Should show a detailed list of available recipes"""
resp, error = self.execute([fix, 'list_recipes_detailed'])
self.assertTrue('subversion::client' in resp)
for field in ['description', 'version', 'dependencies', 'attributes']:
self.assertTrue(field in resp)
def test_list_recipes_detailed_site_cookbooks(self):
"""Should show a detailed list of available recipes with site-cookbook
priority
"""
resp, error = self.execute([fix, 'list_recipes_detailed'])
self.assertTrue('0.8.4' in resp)
def test_no_metadata(self):
"""Should abort if cookbook has no metadata.json"""
bad_cookbook = join(test_path, 'cookbooks', 'bad_cookbook')
os.mkdir(bad_cookbook)
try:
resp, error = self.execute([fix, 'list_recipes'])
except OSError:
self.fail("Couldn't execute {0}".format(fix))
finally:
os.rmdir(bad_cookbook)
expected = 'Fatal error: Cookbook "bad_cookbook" has no metadata.json'
self.assertTrue(expected in error)
class TestListRoles(BaseTest):
def test_list_roles(self):
"""Should list all roles"""
resp, error = self.execute([fix, 'list_roles'])
self.assertTrue('base' in resp and 'example aplication' in resp)
def test_list_roles_detailed(self):
"""Should show a detailed list of all roles"""
resp, error = self.execute([fix, 'list_roles_detailed'])
self.assertTrue('base' in resp and 'example aplication' in resp)
class TestListNodes(BaseTest):
def test_list_nodes(self):
"""Should list all nodes"""
resp, error = self.execute([fix, 'list_nodes'])
for node in ['testnode1', 'testnode2', 'testnode3.mydomain.com']:
self.assertTrue(node in resp)
self.assertTrue('Recipes: subversion' in resp)
def test_list_nodes_in_env(self):
"""Should list all nodes in an environment"""
resp, error = self.exec |
IsCoolEntertainment/debpkg_libcloud | libcloud/test/storage/test_azure_blobs.py | Python | apache-2.0 | 37,730 | 0.001272 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific l | anguage governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import unittest
import tempfile
from xml.etree import ElementTree as ET
fr | om libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.common.types import InvalidCredsError
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.drivers.azure_blobs import AzureBlobsStorageDriver
from libcloud.storage.drivers.azure_blobs import AZURE_BLOCK_MAX_SIZE
from libcloud.storage.drivers.azure_blobs import AZURE_PAGE_CHUNK_SIZE
from libcloud.storage.drivers.dummy import DummyIterator
from libcloud.test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611
from libcloud.test import MockHttpTestCase # pylint: disable-msg=E0611
from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
from libcloud.test.secrets import STORAGE_AZURE_BLOBS_PARAMS
class AzureBlobsMockHttp(StorageMockHttp, MockHttpTestCase):
fixtures = StorageFileFixtures('azure_blobs')
base_headers = {}
def _UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED,
'',
self.base_headers,
httplib.responses[httplib.UNAUTHORIZED])
def _list_containers_EMPTY(self, method, url, body, headers):
body = self.fixtures.load('list_containers_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'marker' not in query:
body = self.fixtures.load('list_containers_1.xml')
else:
body = self.fixtures.load('list_containers_2.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _test_container_EMPTY(self, method, url, body, headers):
if method == 'DELETE':
body = ''
return (httplib.ACCEPTED,
body,
self.base_headers,
httplib.responses[httplib.ACCEPTED])
else:
body = self.fixtures.load('list_objects_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _new__container_INVALID_NAME(self, method, url, body, headers):
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
def _test_container(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'marker' not in query:
body = self.fixtures.load('list_objects_1.xml')
else:
body = self.fixtures.load('list_objects_2.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _test_container100(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
return (httplib.NOT_FOUND,
body,
self.base_headers,
httplib.responses[httplib.NOT_FOUND])
def _test_container200(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-meta1'] = 'value1'
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _test_container200_test(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['content-length'] = 12345
headers['content-type'] = 'application/zip'
headers['x-ms-blob-type'] = 'Block'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-rabbits'] = 'monkeys'
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _test2_test_list_containers(self, method, url, body, headers):
# test_get_object
body = self.fixtures.load('list_containers.xml')
headers = {'content-type': 'application/zip',
'etag': '"e31208wqsdoj329jd"',
'x-amz-meta-rabbits': 'monkeys',
'content-length': 12345,
'last-modified': 'Thu, 13 Sep 2012 07:13:22 GMT'
}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _new_container_ALREADY_EXISTS(self, method, url, body, headers):
# test_create_container
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.CONFLICT])
def _new_container(self, method, url, body, headers):
# test_create_container, test_delete_container
headers = {}
if method == 'PUT':
status = httplib.CREATED
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-meta1'] = 'value1'
elif method == 'DELETE':
status = httplib.NO_CONTENT
return (status,
body,
headers,
httplib.responses[status])
def _new_container_DOESNT_EXIST(self, method, url, body, headers):
# test_delete_container
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_bar_container_NOT_FOUND(self, method, url, body, headers):
# test_delete_container_not_found
return (httplib.NOT_FO |
taw/python_koans | python3/koans/about_regex.py | Python | mit | 4,842 | 0.006402 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import re
class AboutRegex(Koan):
"""
These koans are based on the Ben's book: Regular Expressions in 10 minutes.
I found this books very useful so I decided to write a koans in order to practice everything I had learned from it.
http://www.forta.com/books/0672325667/
"""
def test_matching_literal_text(self):
"""
Lesson 1 Matching Literal String
"""
string = "Hello, my name is Felix and this koans are based on the Ben's book: Regular Expressions in 10 minutes."
m = re.search(r'Felix', string)
self.assertTrue(m and m.group(0) and m.group(0)== 'Felix', "I want my name")
def test_matching_literal_text_how_many(self):
"""
Lesson 1 How many matches?
The default behaviour of most regular expression engines is to return just the first match.
In python you have the next options:
match() --> Determine if the RE matches at the beginning of the string.
search() --> Scan through a string, looking for any location where this RE matches.
findall() --> Find all substrings where the RE matches, and returns them as a list.
finditer() --> Find all substrings where the RE matches, and returns them as an iterator.
"""
string = "Hello, my name is Felix and this koans are based on the Ben's book: Regular Expressions in 10 minutes. Repeat My name is Felix"
m = len(re.findall('Felix', string)) #TIP: Maybe match it's not the best option
# I want to know how many times appears my name
self.assertEqual(m, 2)
def test_matching_literal_text_not_case_sensitivity(self):
"""
Lesson 1 Matching Literal String non case sensitivity.
Most regex implementations also support matches that are not case sensitive. In python you can use re.IGNORECASE, in
Javascript you can specify the optional i flag.
In Ben's book you can see more languages.
"""
string = "Hello, my name is Felix or felix and this koans is based on the Ben's book: Regular Expressions in 10 minutes."
self.assertEqual(re.findall("felix", string), ['felix'])
self.assertEqual(re.findall("felix", string, re.IGNORECASE), ['Felix', 'felix'])
def test_matching_any_character(self):
"""
Lesson 1 Matching any character
. matches any character, alphabetic characters, digits and .
"""
string = "pecks.xlx\n" \
+ "orders1.xls\n" \
+ "apec1.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls"
# TIP: remember the name of this lesson
change_this_search_string = r'a.\.xl.' # <-- I want to find all uses of myArray
self.assertEquals(len(re.findall(change_this_search_string, string)),3)
def test_matching_set_character(self):
"""
Lesson 2 Matching | sets of characters
A set of characters is defined using the metacharacters [ and ]. Everything between them is part of the set and
any one of the set members must mat | ch (but not all).
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find all files for North America(na) or South America(sa), but not (ca)
# TIP you can use the pattern .a. which matches in above test but in this case matches more than you want
change_this_search_string = '[ns]a[0-9].xls'
self.assertEquals(len(re.findall(change_this_search_string, string)),3)
def test_anything_but_matching(self):
"""
Lesson 2 Using character set ranges
Occasionally, you'll want a list of characters that you don't want to match.
Character sets can be negated using the ^ metacharacter.
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "sales3.xls\n" \
+ "europe2.xls\n" \
+ "sam.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find the name sam
change_this_search_string = '[^nc]am.xls'
self.assertEquals(re.findall(change_this_search_string, string), ['sam.xls'])
|
hackedd/gw2api | gw2api/items.py | Python | mit | 4,068 | 0 | from .util import get_cached
__all__ = ("items", "recipes", "item_details", "recipe_details")
def items():
"""This resource returns a list of items that were discovered by players
in the game. Details about a single item can be obtained using the
:func:`item_details` resource.
"""
return get_cached("items.json").get("items")
def recipes():
"""This resource returns a list of recipes that were discovered by players
in the game. Details about a single recipe can be obtained using the
:func:`recipe_details` resource.
"""
return get_cached("recipes.json").get("recipes")
def item_details(item_id, lang="en"):
"""This resource returns a details about a single item.
:param item_id: The item to query for.
:param lang: The language to display the texts in.
The response is an object with at least the following properties. Note that
the availability of some properties depends on the type of the item.
item_id (number):
The item id.
name (string):
The name of the item.
description (string):
The item description.
type (string):
The item type.
level (integer):
The required level.
rarity (string):
The rarity. On of ``Junk``, ``Basic``, ``Fine``, ``Masterwork``,
``Rare``, ``Exotic``, ``Ascended`` or ``Legendary``.
vendor_value (integer):
The value in coins when selling to a vendor.
icon_file_id (string):
The icon file id to be used with the render service.
icon_file_signature (string):
The icon file signature to be used with the render service.
game_types (list):
The game types where the item is usable.
Currently known game types are: ``Activity``, ``Dungeon``, ``Pve``,
``Pvp``, ``PvpLobby`` and ``WvW``
flags (list):
Additional item flags.
Currently known item flags are: ``AccountBound``, ``HideSuffix``,
``NoMysticForge``, ``NoSalvage``, ``NoSell``, ``NotUpgradeable``,
``NoUnderwater``, ``SoulbindOnAcquire``, ``SoulBindOnUse`` and |
``Unique``
restrictions (list):
Race restrictions: ``Asura``, ``Charr``, ``Human``, ``Norn`` and
``Sylvari``.
Each item type has an `additional key`_ with information specific to that
item type.
| .. _additional key: item-properties.html
"""
params = {"item_id": item_id, "lang": lang}
cache_name = "item_details.%(item_id)s.%(lang)s.json" % params
return get_cached("item_details.json", cache_name, params=params)
def recipe_details(recipe_id, lang="en"):
"""This resource returns a details about a single recipe.
:param recipe_id: The recipe to query for.
:param lang: The language to display the texts in.
The response is an object with the following properties:
recipe_id (number):
The recipe id.
type (string):
The type of the produced item.
output_item_id (string):
The item id of the produced item.
output_item_count (string):
The amount of items produced.
min_rating (string):
The minimum rating of the recipe.
time_to_craft_ms (string):
The time it takes to craft the item.
disciplines (list):
A list of crafting disciplines that can use the recipe.
flags (list):
Additional recipe flags. Known flags:
``AutoLearned``:
Set for recipes that don't have to be discovered.
``LearnedFromItem``:
Set for recipes that need a recipe sheet.
ingredients (list):
A list of objects describing the ingredients for this recipe. Each
object contains the following properties:
item_id (string):
The item id of the ingredient.
count (string):
The amount of ingredients required.
"""
params = {"recipe_id": recipe_id, "lang": lang}
cache_name = "recipe_details.%(recipe_id)s.%(lang)s.json" % params
return get_cached("recipe_details.json", cache_name, params=params)
|
50thomatoes50/Ardulan | http_serv.py | Python | gpl-2.0 | 202 | 0.014851 | #!/usr/bin | /env python
from threading import Thread
if(__name__ == '__main__'):
import webbrowser
web = WebServer()
webbrowser.open("http://127.0.0.1:8888")
web.s | tart() |
ssebastianj/ia2013-tpi-rl | src/gui/qtgen/gwgenrndestadosdialog.py | Python | mit | 1,334 | 0.003748 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Sebastian\Mis documentos\Programacion\Proyectos\IA2013TPIRL\gui\qt\IA2013TPIRLGUI\gwgenrndestadosdialog.ui'
#
# Created: Tue Jul 09 15:27:46 2013
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_GWGen | RndEstadosDialog(object):
def setupUi(self, GWGenRndEstadosDialog):
GWGenRndEstadosDialog.setObjectName(_fromUtf8("GWGenRndEstadosDialog"))
GWGenRndEstadosDialog.resize(400, 300)
GWGenRndEstadosDialog.setMo | dal(True)
self.retranslateUi(GWGenRndEstadosDialog)
QtCore.QMetaObject.connectSlotsByName(GWGenRndEstadosDialog)
def retranslateUi(self, GWGenRndEstadosDialog):
GWGenRndEstadosDialog.setWindowTitle(_translate("GWGenRndEstadosDialog", "Generar estados aleatorios", None))
|
yangle/HaliteIO | website/tutorials/machinelearning/TrainMatt.py | Python | mit | 4,981 | 0.003212 | from hlt import *
from networking import *
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD, Adam, RMSprop
from os import listdir, remove
from os.path import join, isfile
def loadGame(filename):
def stringUntil(gameFile, endChar):
returnString = ""
byte = gameFile.read(1)
while byte != endChar.encode("utf-8"):
returnString += byte.decode("utf-8")
byte = gameFile.read(1)
return returnString
mattID = None
frames = []
moves = []
gameFile = open(filename, "rb")
try:
stringUntil(gameFile, "\n")
# Get metadata
metadata = stringUntil(gameFile, "\n")
components = metadata.split(" ")
width = int(components.pop(0))
height = int(components.pop(0))
numPlayers = int(components.pop(0))
numFrames = int(components.pop(0))
# Get matt's playerID
for playerID in range(1, numPlayers+1):
name = stringUntil(gameFile, "\0")
if name == "adereth":
mattID = playerID
stringUntil(gameFile, "\n")
# Get production
productions = [int.from_bytes(gameFile.read(1), byteorder='big') for a in range(width*height)]
gameFile.read(1)
# Get the frames and moves
for frameIndex in range(numFrames-1):
# Frames
frames.append(GameMap(width=width, height=height, numberOfPlayers=numPlayers))
x = 0
y = 0
while y < height:
numTiles = int.from_bytes(gameFile.read(1), byteorder='big')
ownerID = int.from_bytes(gameFile.read(1), byteorder='big')
strengths = []
for a in range(numTiles):
frames[-1].contents[y][x] = Site(ownerID, int.from_bytes(gameFile.read(1), byteorder='big'), productions[y*width + x])
x += 1
if x == width:
x = 0
y += 1
if y == height:
break
# Moves
moves.append({(index % width, math.floor(index/width)):int.from_bytes(gameFile.read(1), byteorder='big') for index in range(width*height)})
| finally:
gameFile.close()
return mattID, frames, moves
def getNNData():
inputs = []
correctOutputs = []
gamePath = "replays"
for filename in [f for f in listdir(gamePath) if isfile(join(gamePath, f))]:
print("Loading " + filename)
| mattID, frames, moves = loadGame(join(gamePath, filename))
maxProduction = 0
for y in range(frames[0].height):
for x in range(frames[0].width):
prod = frames[0].getSite(Location(x, y)).production
if prod > maxProduction:
maxProduction = prod
for turnIndex in range(len(moves)):
gameMap = frames[turnIndex]
for y in range(gameMap.height):
for x in range(gameMap.width):
loc = Location(x, y)
if gameMap.getSite(loc).owner == mattID:
box = [gameMap.getSite(gameMap.getLocation(loc, NORTH), WEST), gameMap.getSite(loc, NORTH), gameMap.getSite(gameMap.getLocation(loc, NORTH), EAST), gameMap.getSite(loc, EAST), gameMap.getSite(gameMap.getLocation(loc, SOUTH), EAST), gameMap.getSite(loc, SOUTH), gameMap.getSite(gameMap.getLocation(loc, SOUTH), WEST), gameMap.getSite(loc, WEST)]
nnInput = []
for site in box:
nnInput += [1 if site.owner == mattID else -1, float(site.strength / 255), float(site.production / maxProduction)]
inputs.append(nnInput)
correctOutputs.append([1 if a == moves[turnIndex][(x, y)] else 0 for a in range(5)])
return inputs, correctOutputs
def trainModel():
inputs, correctOutputs = getNNData()
print("Collected data")
trainingInputs = inputs[:len(inputs)//2]
trainingOutputs = correctOutputs[:len(correctOutputs)//2]
testInputs = inputs[len(inputs)//2:]
testOutputs = correctOutputs[len(correctOutputs)//2:]
model = Sequential()
model.add(Dense(24, input_shape=(24, )))
model.add(Activation('tanh'))
model.add(Dense(24))
model.add(Activation('tanh'))
model.add(Dense(5))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='mean_squared_error', optimizer=SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True))
model.fit(trainingInputs, trainingOutputs, validation_data=(testInputs, testOutputs))
score = model.evaluate(testInputs, testOutputs, verbose=0)
print(score)
json_string = model.to_json()
open('my_model_architecture.json', 'w').write(json_string)
model.save_weights('my_model_weights.h5', overwrite=True)
trainModel()
|
sknepneklab/SAMoS | analysis/batch_polar/batch_analyze_J10.py | Python | gpl-3.0 | 4,528 | 0.04439 | # ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
# Author: Rastko Sknepnek
#
# Division of Physics
# School of Engineering, Physics and Mathematics
# University of Dundee
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
# Integrator code for batch processing of full data runs (incorporating parts of earlier analysis scripts)
# Data interfacing
from read_data import *
from read_param import *
# Pre-existing analysis scripts
from energy_profile_lib_v import *
#from glob import glob
#from StressEnergy_plot import StressEnergy
#class GetAnalysis:
#def __init__(self,folder,outfilename,skip):
#self.outfilename = outfilename
#self.folder = folder
#self.skip = skip
#self.param = Param(self.folder)
#def CollectProfiles(self):
#[theta_bin,en_prof, vel_prof, press_prof, tot_histo, rho_prof, north_pole]=EnProf.getProfiles(self.folder,self.skip,180,self.param.r,self.param.k)
# This is the structured data file hierarchy. Repl | ace as appropriate (do not go the Yaouen way and fully automatize ...)
basefolder = '/home/silke/Documents/CurrentProjects/Rastko/Runs/'
outfolder= '/home/silke/Documents/CurrentProjects/Rastko/analysis/'
#JList=['10', '1', '0.1', '0.01' | ]
vList=['0.005','0.01','0.02','0.05','0.1','0.2','0.5','1']
JList=['10']
nu_r='0.002'
phi='1'
sigma=1
nstep=10000000
nsave=10000
nsnap=int(nstep/nsave)
skip=int(nsnap/2)
nbin=180
for i in range(len(vList)):
for j in range(len(JList)):
print vList[i],JList[j]
folder=basefolder+'data_v0_'+vList[i]+'/data_j_'+JList[j]+'_sphere/'
print folder
param = Param(folder)
files = sorted(glob(folder+'*.dat'))[skip:]
rho_profile =np.zeros((nbin,))
vel_profile =np.zeros((nbin,))
eng_profile = np.zeros((nbin,))
press_profile = np.zeros((nbin,))
s_tt_profile = np.zeros((nbin,))
s_tp_profile = np.zeros((nbin,))
s_pt_profile = np.zeros((nbin,))
s_pp_profile = np.zeros((nbin,))
alpha_profile = np.zeros((nbin,))
alpha_v_profile = np.zeros((nbin,))
axis = np.zeros((len(files),3))
orderpar = np.zeros((len(files),3))
iscount = np.zeros((nbin,))
tot = 0
for f in files:
[theta_bin,rho_profile0,vel_profile0,eng_profile0,press_profile0,s_tt_profile0,s_tp_profile0,s_pt_profile0,s_pp_profile0,alpha_profile0,alpha_v_profile0,axis[tot,:],orderpar[tot,:]]=getProfiles(f,nbin,param.r,param.k,sigma)
isparticles=np.array([index for index,value in enumerate(rho_profile0) if (value >0)])
iscount[isparticles]+=1
rho_profile[isparticles]+=rho_profile0[isparticles]
vel_profile[isparticles]+=vel_profile0[isparticles]
eng_profile[isparticles]+=eng_profile0[isparticles]
press_profile[isparticles]+=press_profile0[isparticles]
s_tt_profile[isparticles]+=s_tt_profile0[isparticles]
s_tp_profile[isparticles]+=s_tp_profile0[isparticles]
s_pt_profile[isparticles]+=s_pt_profile0[isparticles]
s_pp_profile[isparticles]+=s_pp_profile0[isparticles]
alpha_profile[isparticles]+=alpha_profile0[isparticles]
alpha_v_profile[isparticles]+=alpha_v_profile0[isparticles]
tot +=1
orderpar/=(param.v0*param.r)
issomething=[index for index,value in enumerate(iscount) if (value >0)]
rho_profile[issomething]/=iscount[issomething]
vel_profile[issomething]/=iscount[issomething]
eng_profile[issomething]/=iscount[issomething]
press_profile[issomething]/=iscount[issomething]
s_tt_profile[issomething]/=iscount[issomething]
s_tp_profile[issomething]/=iscount[issomething]
s_pt_profile[issomething]/=iscount[issomething]
s_pp_profile[issomething]/=iscount[issomething]
alpha_profile[issomething]/=iscount[issomething]
alpha_v_profile[issomething]/=iscount[issomething]
outfile=outfolder+'profilesV_v0' + vList[i] + '_j' + JList[j] + '.dat'
outfile2=outfolder + 'axisV_v0' + vList[i] + '_j' + JList[j] + '.dat'
np.savetxt(outfile, np.transpose(np.array([theta_bin,rho_profile,vel_profile,eng_profile,press_profile,s_tt_profile,s_tp_profile,s_pt_profile,s_pp_profile,alpha_profile,alpha_v_profile])),fmt='%12.6g', header='theta rho vel energy pressure s_tt s_tp s_pt s_pp alpha alpha_v') # x,y,z equal sized 1D arrays
np.savetxt(outfile2,np.concatenate((axis,orderpar),axis=1),fmt='%12.6g', header='axis orderpar') |
wbtuomela/mezzanine | mezzanine/pages/urls.py | Python | bsd-2-clause | 332 | 0 | from __future__ import unicode_literals
from django.conf.urls import url
from django.conf import settings
from mezzanine.pages import page_processors, views
page_processors.autodiscover()
# Page patterns.
urlpatterns = | [
| url("^(?P<slug>.*)%s$" % ("/" if settings.APPEND_SLASH else ""),
views.page, name="page"),
]
|
winstonschroeder77/Python_KMR-1.8 | setup.py | Python | mit | 1,394 | 0.021521 | # Workaround for issue in Python 2.7.3
# See http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing
except ImportError:
pass
try:
# Try | using ez_setup to install setuptools if not already installed.
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
# Ignore import error and assume Python 3 which already has setuptools.
pass
from setuptools import setup, find_pa | ckages
classifiers = ['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name = 'KMR18',
version = '0.0.1',
description = 'Library to control an KMR-1.8 TFT LCD display based on work by cskau.',
license = 'MIT',
classifiers = classifiers,
url = 'https://github.com/winstonschroeder77/Python_KMR-1.8/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.6.5'],
install_requires = ['Adafruit-GPIO>=0.6.5'],
packages = find_packages())
|
metalwihen/udacity-full-stack-nanodegree-projects | Project1/entertainment_center.py | Python | unlicense | 1,627 | 0.004917 | import media
import fresh_tomatoes
def run():
""" Generate and open the movies listing html page """
fresh_tomatoes.open_movies_page(
get_movie_list(),
"Top Anime Movies")
def get_movie_list():
""" Retrieve a list of movies """
return [media.Movie(
"Your Name",
"https://upload.wikimedia.org/wikipedia/en/0/0b/Your_Name_poster.png",
"https://www.youtube.com/wa | tch?v=xU47nhruN-Q"),
media.Movie(
"My Neighbor Totoro",
"https://upload.wikimedia.org/wikipedia/en/0/02/My_Neighbor_Totoro_-_Tonari_no_Totoro_%28Movie_Poster%29.jpg",
"https://www.youtube.com/watch?v=92a7Hj0ijLs"),
media.Movie(
"Spirited Away",
"https://upload.wikimedia.org/wikipedia/en/3/30/Spirited_Away_poster.JPG",
"https://www.youtube.com/watch?v=7cv5p1XNuD | w"),
media.Movie(
"Kiki's Delivery Service",
"https://upload.wikimedia.org/wikipedia/en/0/07/Kiki%27s_Delivery_Service_%28Movie%29.jpg",
"https://www.youtube.com/watch?v=4bG17OYs-GA"),
media.Movie(
"Wolf Children",
"https://upload.wikimedia.org/wikipedia/en/9/9c/%C5%8Ckami_Kodomo_no_Ame_to_Yuki_poster.jpg",
"https://www.youtube.com/watch?v=8xLji7WsW0w"),
media.Movie(
"Ghost in the Shell",
"https://upload.wikimedia.org/wikipedia/en/c/ca/Ghostintheshellposter.jpg",
"https://www.youtube.com/watch?v=SvBVDibOrgs")]
run()
|
sassoftware/rmake3 | rmake/build/buildjob.py | Python | apache-2.0 | 12,119 | 0.002888 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import sys
import time
from rmake import failure
from rmake.build import buildtrove
from rmake.lib import uuid
jobStates = {
'JOB_STATE_INIT' : 0,
'JOB_STATE_LOADING' : 100,
'JOB_STATE_BUILD' : 101,
'JOB_STATE_BUILT' : 200,
'JOB_STATE_FAILED' : 400,
}
# assign jobStates to this module's dict so that they can be referenced with
# module 'getattribute' notation (eg; buildjob.JOB_STATE_INIT)
sys.modules[__name__].__dict__.update(jobStates)
stateNames = dict([(x[1], x[0].split('_')[-1].capitalize()) \
for x in jobStates.iteritems()])
# only need to specify names that differ from their variable name
stateNames.update({
JOB_STATE_INIT : 'Initialized',
JOB_STATE_BUILD : 'Building',
})
ACTIVE_STATES = [ JOB_STATE_INIT, JOB_STATE_BUILD, JOB_STATE_LOADING ]
def _getStateName(state):
return stateNames[state]
class _AbstractBuildJob(object):
"""
Abstract BuildJob.
Contains basic data for a build job and methods for accessing that
data. Most setting of this data (after creation) should be through
methods that are defined in BuildJob subclass.
"""
def __init__(self, jobUUID=None, jobId=None, jobName=None, troveList=(),
state=JOB_STATE_INIT, status='', owner=None, failure=None,
configs=(), timeStarted=None, timeUpdated=None, timeFinished=None):
self.jobUUID = jobUUID or uuid.uuid4()
self.jobId = jobId
self.jobName = jobName
self.state = state
self.status = status
self.owner = owner
self.failure = failure
self.timeStarted = timeStarted
self.timeUpdated = timeUpdated
self.timeFinished = timeFinished
self.troveContexts = {}
self.troves = {}
self.configs = dict(configs)
for troveTup in troveList:
self.addTrove(*troveTup)
def hasTrove(self, name, version, flavor, context=''):
return (name, version, flavor, context) in self.troves
def findTrovesWithContext(self, labelPath, troveSpecList, *args, **kw):
contextLists = {}
for n,v,f,c in troveSpecList:
contextLists.setdefault((n,v,f), []).append(c)
results = self.findTroves(labelPath, contextLists, *args, **kw)
finalResults = {}
for troveSpec, troveList in results.iteritems():
for context in contextLists[troveSpec]:
l = []
finalResults[troveSpec + (context,)] = l
for troveTup in troveList:
if context is None:
for c in self.troveContexts[troveTup]:
l.append(troveTup + (c,))
elif context in self.troveContexts[troveTup]:
l.append(troveTup + (context,))
return finalResults
def addTrove(self, name, version, flavor, context='', buildTrove=None):
if buildTrove:
assert(buildTrove.getContext() == context)
else:
buildTrove = buildtrove.BuildTrove(None, name, version, flavor,
context=context)
buildTrove.setPublisher(self.getPublisher())
self.troves[name, version, flavor, context] = buildTrove
self.troveContexts.setdefault((name, version, flavor), []).append(context)
if buildTrove.getConfig():
self.setTroveConfig(buildTrove, buildTrove.getConfig())
def removeTrove(self, name, version, flavor, context=''):
del self.troves[name,version,flavor,context]
l = self.troveContexts[name,version,flavor]
l.remove(context)
if not l:
del self.troveContexts[name, version, flavor]
def addBuildTrove(self, buildTrove):
self.addTrove(buildTrove=buildTrove,
*buildTrove.getNameVersionFlavor(withContext=True))
def setBuildTroves(self, buildTroves):
self.troves = {}
self.troveContexts = {}
for trove in buildTroves:
trove.jobId = self.jobId
self.troves[trove.getNameVersionFlavor(withContext=True)] = trove
self.troveContexts.setdefault(trove.getNameVersionFlavor(),
[]).append(trove.getContext())
def iterTroveList(self, withContexts=False):
if withContexts:
return self.troves.iterkeys()
else:
return self.troveContexts.iterkeys()
def iterLoadableTroveList(self):
return (x[0] for x in self.troves.iteritems())
def iterLoadableTroves(self):
return (x for x in self.troves.itervalues())
def getTrove(self, name, version, flavor, context=''):
return self.troves[name, version, flavor, context]
def iterTroves(self):
return self.troves.itervalues()
def getStateName(self):
"""
Returns human-readable name for current state
"""
return _getStateName(self.state)
def getFailureReason(self):
return self.failureReason
def isBuilding(self):
return self.state == JOB_STATE_BUILD
def isBuilt(self):
return self.state == JOB_STATE_BUILT
def isFailed(self):
return self.state == JOB_STATE_FAILED
def isFinished(self):
return self.state in (JOB_STATE_FAILED, JOB_STATE_BUILT)
def isRunning(self):
return self.state in ACTIVE_STATES
def isLoading(self):
return self.state == JOB_STATE_LOADING
def trovesInProgress(self):
for trove in self.iterTroves():
if trove.isBuilding() or trove.isBuildable():
return True
return False
def iterTrovesByState(self, state):
return (x for x in self.iterTroves() if x.state == state)
def getBuiltTroveList(self):
return list(itertools.chain(*[ x.getBinaryTroves() for x in
self.iterTroves()]))
def getTrovesByName(self, name):
name = name.split(':')[0] + ':source'
return [ x for x in self.troveContexts if x[0] == name ]
def iterFailedTroves(self):
return (x for x in self.iterTroves() if x.isFailed())
def iterPrimaryFailureTroves(self):
return (x for x in self.iterTroves() if x.isPrimaryFailure())
def iterBuiltTroves(self):
return (x for x in self.iterTroves() if x.isBuilt())
def iterUnbuiltTroves(self):
return (x for x in self.iterTroves() if x.isUnbuilt())
def iterBuildingTroves(self):
return (x for x in self.iterTroves() if x.isBuilding())
def iterWaitingTroves(self):
return (x for x in self.iterTroves() if x.isWaiting())
def iterPreparingTroves(self):
return (x for x in self.iterTroves() if x.isPreparing())
def hasBuildingTroves(self):
retur | n self._hasTrovesByCheck('isBuilding')
def iterBuildableTroves(self):
return (x for x in self.iterTroves() if x.isBuildable())
def hasBuildableTroves(self):
return self._hasTrovesByCheck('isBuildable')
def _hasTrovesByCheck(self, check):
for trove in self.iterTroves():
if getattr(trove, check)():
return True
return False
def getMainConfig(sel | f):
if '' in self.configs:
return self.configs['']
def setMainConfig(self, config):
self.configs[''] = config
for trove in self.iterTroves():
if not trove.getContext():
trove.cfg = config
def getC |
ovresko/erpnext | erpnext/patches/v9_0/remove_non_existing_warehouse_from_stock_settings.py | Python | gpl-3.0 | 322 | 0.021739 | from __future__ import unicode_literals
import frappe
def execute():
default_warehouse = frappe.db.get_value("Stock Settings", None, "default_warehouse")
if default_warehouse:
if not frappe.db.get_value("Warehouse", {"name": d | efault_warehouse}):
| frappe.db.set_value("Stock Settings", None, "default_warehouse", "") |
Yelp/paasta | paasta_tools/secret_providers/vault.py | Python | apache-2.0 | 6,460 | 0.000464 | import getpass
import os
from typing import Any
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
try:
from vault_tools.client.jsonsecret import get_plaintext
from vault_tools.paasta_secret import get_vault_client
from vault_tools.gpg import TempGpgKeyring
from vault_tools.paasta_secret import encrypt_secret
from vault_tools.cert_tools import do_cert_renew
import hvac
except ImportError:
def get_plaintext(*args: Any, **kwargs: Any) -> bytes:
return b"No plain text available without vault_tools"
def get_vault_client(*args: Any, **kwargs: Any) -> None:
return None
TempGpgKeyring = None
def encrypt_secret(*args: Any, **kwargs: Any) -> None:
return None
def do_cert_renew(*args: Any, **kwargs: Any) -> None:
return None
from paasta_tools.secret_providers import BaseSecretProvider
from paasta_tools.secret_tools import get_secret_name_from_ref
class SecretProvider(BaseSecretProvider):
def __init__(
self,
soa_dir: Optional[str],
service_name: Optional[str],
cluster_names: List[str],
vault_cluster_config: Dict[str, str] = {},
vault_auth_method: str = "ldap",
vault_token_file: str = "/root/.vault-token",
vault_num_uses: int = 1,
**kwargs: Any,
) -> None:
super().__init__(soa_dir, service_name, cluster_names)
self.vault_cluster_config = vault_cluster_config
self.vault_auth_method = vault_auth_method
self.vault_token_file = vault_token_file
self.ecosystems = self.get_vault_ecosystems_for_clusters()
self.clients: Mapping[str, hvac.Client] = {}
if vault_auth_method == "ldap":
username = getpass.getuser()
password = getpass.getpass(
"Please enter your LDAP password to auth with Vault\n"
)
else:
username = None
password = None
for ecosystem in self.ecosystems:
self.clients[ecosystem] = get_vault_client(
ecosystem=ecosystem,
num_uses=vault_num_uses,
vault_auth_method=self.vault_auth_method,
vault_token_file=self.vault_token_file,
username=username,
password=password,
)
def decrypt_environment(
self, environment: Dict[str, str], **kwargs: Any
) -> Dict[str, str]:
client = self.clients[self.ecosystems[0]]
secret_environment = {}
for k, v in environment.items():
secret_name = get_secret_name_from_ref(v)
secret_path = os.path.join(self.secret_dir, f"{secret_name}.json")
secret = get_plaintext(
client=client,
env=self.ecosystems[0],
path=secret_path,
cache_enabled=False,
cache_dir=None,
cache_key=None,
context=self.service_name,
rescue_failures=False,
).decode("utf-8")
secret_environment[k] = secret
return secret_environment
def get_vault_ecosystems_for_clusters(self) -> List[str]:
try:
return list(
{
self.vault_cluster_config[cluster_name]
for cluster_name in self.cluster_names
}
)
except KeyError as e:
print(
"Cannot find a vault cluster for the %s paasta cluster. A mapping must exist "
"in /etc/paasta so | we contact the correct vault cluster to get/set secrets"
% e
)
raise
def write_secret(
self,
action: str,
secret_name: str,
plaintext: bytes,
cross_environment_motivation: Optional[str] = None,
) -> None:
with TempGpgKeyring(overwrite=True):
for ecosystem in self.ecosys | tems:
client = self.clients[ecosystem]
encrypt_secret(
client=client,
action=action,
ecosystem=ecosystem,
secret_name=secret_name,
soa_dir=self.soa_dir,
plaintext=plaintext,
service_name=self.service_name,
transit_key=self.encryption_key,
cross_environment_motivation=cross_environment_motivation,
)
def decrypt_secret(self, secret_name: str) -> str:
client = self.clients[self.ecosystems[0]]
secret_path = os.path.join(self.secret_dir, f"{secret_name}.json")
return get_plaintext(
client=client,
path=secret_path,
env=self.ecosystems[0],
cache_enabled=False,
cache_key=None,
cache_dir=None,
context=self.service_name,
rescue_failures=False,
).decode("utf-8")
def decrypt_secret_raw(self, secret_name: str) -> bytes:
client = self.clients[self.ecosystems[0]]
secret_path = os.path.join(self.secret_dir, f"{secret_name}.json")
return get_plaintext(
client=client,
path=secret_path,
env=self.ecosystems[0],
cache_enabled=False,
cache_key=None,
cache_dir=None,
context=self.service_name,
rescue_failures=False,
)
def get_secret_signature_from_data(self, data: Mapping[str, Any]) -> Optional[str]:
ecosystem = self.ecosystems[0]
if data["environments"].get(ecosystem):
return data["environments"][ecosystem]["signature"]
else:
return None
def renew_issue_cert(self, pki_backend: str, ttl: str) -> None:
client = self.clients[self.ecosystems[0]]
user = getpass.getuser()
pki_dir = os.path.expanduser("~/.paasta/pki")
do_cert_renew(
client=client,
pki_backend=pki_backend,
role=user,
cn=f"{user}.{self.ecosystems[0]}.paasta.yelp",
cert_path=f"{pki_dir}/{self.ecosystems[0]}.crt",
key_path=f"{pki_dir}/{self.ecosystems[0]}.key",
ca_path=f"{pki_dir}/{self.ecosystems[0]}_ca.crt",
cert_owner=user,
cert_group="users",
cert_mode="0600",
ttl=ttl,
)
|
elyak123/obeying-the-testing-goat | functional_tests/test_simple_list_creation.py | Python | mit | 3,802 | 0.005786 | from selenium import webdriver
from functional_tests.base import FunctionalTest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class NewvisitorTest(FunctionalTest):
def test_can_start_a_list_for_one_user(self):
#Edith has heard about a cool new on-line to-do app
#She goes to checkout its home page.
self.browser.get(self.live_server_url)
#Se notices the page title and header mention to-do lists
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
#She is invited to enter a to-do item right away
inputbox= self.get_item_input_box()
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
#She types "Buy peac | ock feathers" into a text box
inputbox.send_keys('Buy peacock feathers')
#When she presses enter the page gets updated and now the page lists
#1. | - "Buy Peak Cock feathers" as an item in a to-do list
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# There is still a text box inviting her to add another item. She
# enters "Use peacock feathers to make a fly" (Edith is very methodical)
inputbox = self.get_item_input_box()
inputbox.send_keys('Use peacock feathers to make a fly')
inputbox.send_keys(Keys.ENTER)
# The page updates again, and now shows both items on her list
self.wait_for_row_in_list_table('2: Use peacock feathers to make a fly')
# Edith wonders whether the site will remember her list. Then she sees
# that the site has generated a unique URL for her -- there is some
# explanatory text to that effect.
# She visits that URL - her to-do list is still there.
# Satisfied, she goes back to sleep
def test_multiple_users_can_start_lists_at_different_urls(self):
#Edith start a new todo list
self.browser.get(self.live_server_url)
inputbox = self.get_item_input_box()
inputbox.send_keys('Buy peacock feathers')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
#She notices that her list has a unique URL
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/lists/.+')
#Now a new user, Francis, comes along to the site.
## We use a new browser session to make sure that no
## information of Edith's is coming through from cookies etc
self.browser.quit()
self.browser = webdriver.Firefox()
# Francis visits the home page. There is no sign of Edith's
# list
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('make a fly', page_text)
# Francis starts a new list by entering a new item. He
# is less interesting than Edith
inputbox = self.get_item_input_box()
inputbox.send_keys('Buy milk')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy milk')
# Francis gets his own unique URL
francis_list_url = self.browser.current_url
self.assertRegex(francis_list_url, '/lists/.+')
#Again, there is no trace of Edith's list
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertIn('Buy milk', page_text)
#Satisfied, they both go back to sleep
|
AversivePlusPlus/AversivePlusPlus | modules/thirdparty/arduino/conanfile.py | Python | bsd-3-clause | 1,882 | 0.006376 | from conans import ConanFile, CMake
class AversivePlusPlusModuleConan(ConanFile):
name = "arduino"
version = "0.1"
exports = "*"
settings = "target"
def build(self):
sources = '%s/cores/arduino/*.c %s/cores/arduino/*.cpp' % (self.conanfile_directory, self.conanfile_directory)
inc_arduino = '-I%s/cores/arduino/' % (self.conanfile_directory)
inc_variant = '-I%s/variants/mega/' % (self.conanfile_dire | ctory)
flags = '-mmcu=atmega2560 -DF_CPU=16000000L -Os'
if self.settings.target == "arduino-uno":
inc_variant = '-I%s/variants/standard/' % (self.conanfile_dire | ctory)
flags = '-mmcu=atmega328p -DF_CPU=16000000L -Os'
elif self.settings.target == "arduino-mega2560":
inc_variant = '-I%s/variants/mega/' % (self.conanfile_directory)
flags = '-mmcu=atmega2560 -DF_CPU=16000000L -Os'
self.run('avr-gcc -c %s %s %s %s' % (sources, inc_arduino, inc_variant, flags))
self.run('avr-ar r libarduino.a *.o')
def package(self):
self.copy("*.h", src="cores/arduino", dst="include")
self.copy("*.a", dst="lib")
if self.settings.target == "arduino-uno":
self.copy("*.h", src='variants/standard', dst='include')
elif self.settings.target == "arduino-mega2560":
self.copy("*.h", src='variants/mega', dst='include')
def package_info(self):
self.cpp_info.libs = ["arduino"]
self.cpp_info.defines = ["F_CPU=16000000L"]
if self.settings.target == "arduino-uno":
self.cpp_info.cflags = ["-mmcu=atmega328p", "-Os"]
self.cpp_info.cppflags = ["-mmcu=atmega328p", "-Os"]
elif self.settings.target == "arduino-mega2560":
self.cpp_info.cflags = ["-mmcu=atmega2560", "-Os"]
self.cpp_info.cppflags = ["-mmcu=atmega2560", "-Os"]
|
hbeatty/incubator-trafficcontrol | traffic_control/clients/python/to_access/__init__.py | Python | apache-2.0 | 16,838 | 0.028151 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
.. _toaccess:
.. program:: toaccess
``toaccess``
============
This module provides a set of functions meant to provide ease-of-use functionality for interacting
with the Traffic Ops API. It provides scripts named :file:`to{method}` where `method` is the name of
an HTTP method (in lowercase). Collectively they are referred to as :program:`toaccess` Implemented
methods thus far are:
- delete
- head
- get
- options
- patch
- post
- put
Arguments and Flags
-------------------
.. option:: PATH
This is the request path. By default, whatever is passed is considered to be relative to
:file:`/api/{api-version}/` where ``api-version`` is :option:`--api-version`. This behavior can
be disabled by using :option:`--raw-path`.
.. option:: DATA
An optional positional argument that is a data payload to pass to the Traffic Ops server in the
request body. If this is the absolute or relative path to a file, the contents of the file will
instead be read and used as the request payload.
.. option:: -h, --help
Print usage information and exit
.. option:: -a API_VERSION, --api-version API_VERSION
Specifies the version of the Traffic Ops API that will be used for the request. Has no effect if
:option:`--raw-path` is used. (Default: 2.0)
.. option:: -f, --full
Output the full HTTP exchange including request method line, request headers, request body (if
any), response status line, and response headers (as well as the response body, if any). This is
equivalent to using :option:`--request-headers`, :option:`--request-payload`, and
:option:`--response-headers` at the same time, and those options will have no effect if given.
(Default: false)
.. option:: -k, --insecure
Do not verify SSL certificates - typically useful for making requests to development or testing
servers as they frequently have self-signed certificates. (Default: false)
.. option:: -p, --pretty
Pretty-print any payloads that are output as formatted JSON. Has no effect on plaintext payloads.
Uses tab characters for indentation. (Default: false)
.. option:: -r, --raw-path
Request exactly :option:`PATH`; do not preface the request path with :file:`/api/{api_version}`.
This effectively means that :option:`--api-version` will have no effect. (Default: false)
.. option:: -v, --version
Print version information and exit.
.. option:: --request-headers
Output the request method line and any and all request headers. (Default: false)
.. option:: --request-payload
Output the request body if any was sent. Will attempt to pretty-print the body as JSON if
:option:`--pretty` is used. (Default: false)
.. option:: --response-headers
Output the response status line and any and all response headers. (Default: false)
.. option:: --to-url URL
The :abbr:`FQDN (Fully Qualified Domain Name)` and optionally the port and scheme of the Traffic
Ops server. This will override :envvar:`TO_URL`. The format is the same as for :envvar:`TO_URL`.
(Default: uses the value of :envvar:`TO_URL`)
.. option:: --to-password PASSWORD
The password to use when authenticating to Traffic Ops. Overrides :envvar:`TO_PASSWORD`.
(Default: uses the value of :envvar:`TO_PASSWORD`)
.. option:: --to-user USERNAME
The username to use when connecting to Traffic Ops. Overrides :envvar:`TO_USER`. (Default: uses
the value of :envvar:`TO_USER`)
Environment Variables
---------------------
If defined, :program:`toaccess` scripts will use the :envvar | :`TO_URL`, :envvar:`TO_USER`, and
:envvar`TO_PASSWORD` environment variables to define their connection to and authentication with the
Traffic Ops server. Typically, setting these is easier than using the long options :option:`--to-url`,
:option:`--to-user`, and :option:`--to-password` on every invocation.
Exit Codes
----------
The exit code of a :program:`toaccess | ` script can sometimes be used by the caller to determine what
the result of calling the script was without needing to parse the output. The exit codes used are:
0
The command executed successfully, and the result is on STDOUT.
1
Typically this exit code means that an error was encountered when parsing positional command
line arguments. However, this is also the exit code used by most Python interpreters to signal
an unhandled exception.
2
Signifies a runtime error that caused the request to fail - this is **not** generally indicative
of an HTTP client or server error, but rather an underlying issue connecting to or
authenticating with Traffic Ops. This is distinct from an exit code of ``32`` in that the
*format* of the arguments was correct, but there was some problem with the *value*. For example,
passing ``https://test:`` to :option:`--to-url` will cause an exit code of ``2``, not ``32``.
4
An HTTP client error occurred. The HTTP stack will be printed to stdout as indicated by other
options - meaning by default it will only print the response payload if one was given, but will
respect options like e.g. :option:`--request-payload` as well as
:option:`-p`/:option:`--pretty`.
5
An HTTP server error occurred. The HTTP stack will be printed to stdout as indicated by other
options - meaning by default it will only print the response payload if one was given, but will
respect options like e.g. :option:`--request-payload` as well as
:option:`-p`/:option:`--pretty`.
32
This is the error code emitted by Python's :mod:`argparse` module when the passed arguments
could not be parsed successfully.
.. note:: The way exit codes ``4`` and ``5`` are implemented is by returning the status code of the
HTTP request divided by 100 whenever it is at least 400. This means that if the Traffic Ops
server ever started returning e.g. 700 status codes, the exit code of the script would be 7.
Module Reference
================
"""
import json
import logging
import os
import sys
from urllib.parse import urlparse
from trafficops.restapi import LoginError, OperationError, InvalidJSONError
from trafficops.tosession import TOSession
from trafficops.__version__ import __version__
from requests.exceptions import RequestException
l = logging.getLogger()
l.disabled = True
logging.basicConfig(level=logging.CRITICAL+1)
def output(r, pretty, request_header, response_header, request_payload, indent = '\t'):
"""
Prints the passed response object in a format consistent with the other parameters.
:param r: The :mod:`requests` response object being printed
:param pretty: If :const:`True`, attempt to pretty-print payloads as JSON
:param request_header: If :const:`True`, print request line and request headers
:param response_header: If :const:`True`, print response line and response headers
:param request_payload: If :const:`True`, print the request payload
:param indent: An optional number of spaces for pretty-printing indentation (default is the tab character)
"""
if request_header:
print(r.request.method, r.request.path_url, "HTTP/1.1")
for h,v in r.request.headers.items():
print("%s:" % h, v)
print()
if request_payload and r.request.body:
try:
result = r.request.body if not pretty else json.dumps(json.loads(r.request.body))
except ValueError:
result = r.request.body
print(result, end="\n\n")
if response_header:
print("HTTP/1.1", r.status_code, end="")
print(" "+r.reason if r.reason else "")
for h,v in r.headers.items():
print("%s:" % h, v)
print()
try:
result = r.text if not pretty else json.dumps(r.json(), indent=indent)
except ValueError:
result = r.text
print(result)
def parse_arguments(program):
"""
A common-use function that parses the command line arguments.
:param program: The name of |
hydroshare/hydroshare_temp | hs_core/admin.py | Python | bsd-3-clause | 527 | 0.009488 | from mezzanine.pages.admin import PageAdmin
from django.contrib.gis import admin
from .models import *
from dublincore.models import QualifiedDublinCoreElement
class InlineDublinCoreMetadata(generic.GenericTabularInline):
model = QualifiedD | ublinCoreElement
class InlineResourceFiles(ge | neric.GenericTabularInline):
model = ResourceFile
class GenericResourceAdmin(PageAdmin):
inlines = PageAdmin.inlines + [InlineDublinCoreMetadata, InlineResourceFiles]
admin.site.register(GenericResource, GenericResourceAdmin)
|
fishilico/shared | python/crypto/ed25519_tests.py | Python | mit | 49,246 | 0.001624 | #!/usr/bin/env python
# -*- coding:UTF-8 -*-
# Copyright (c) 2018 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ARISING FROM,
# OUT OF OR IN C | ONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Perform some operations with Ed25519 algorithm
Curve25519:
* q = 2**255 - 19
= 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed
* d = (-121665 / 121666) modulo q
= 0x52036cee2b6ffe738cc740797779e89800700a4d4141d8ab75eb4dca135978a3
* i = sqrt(-1) modulo q
= 0x2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0
* Base point (generator):
* B.x = 0x216936d3cd6e53fec0a4e231fdd6dc5c692cc7609525a7b2c9562d608f25d51a
* B.y = 4/5
= 0x6666666666666666666666666666666666666666666666666666666666666658
* order l = 2**252 + 27742317777372353535851937790883648493
= 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed
= 7237005577332262213973186563042994240857116359379907606001950938285454250989
* (l * B) = {x=0, y=1}
Twisted Edwards curve: -x**2 + y**2 = 1 + d x**2 y**2
Montgomery curve expression: y**2 = x**3 + 486662 x**2 + x, base point (x = 9)
Equivalence equations from Montgomery curve (u, v) to Twisted Edwards curve (x, y):
* x = u/v * sqrt(-486664)
* y = (u - 1) / (u + 1)
Inverse equation ("birational maps"):
* u = (1 + y) / (1 - y)
* v = u/x * sqrt(-486664)
The curve itself has order 8*l (the cofactor is 8 = 2**3), and there exists a
generator, for example with the point from y=3.
This point must be a generator, because 2**255 < 8*l < 2**256 (so 2*(8*l) > 2*q
which makes it impossible to have another orbit).
Because of this, there is an interesting property:
There exists only one subgroup of order l, and it is the one generated by B.
Indeed, B generates a subgroup of l items of a curve that contains 8*l items.
With P a point of order l and G a generator of th curve, there exists p such
that P = pG and 0 <= p < 8*l. Then, O = l*P = (l*p)*G so l*p is a multiple of
the curve order. Therefore p is a multiple of 8. But there are only l multiple
of 8 between 0 and 8*l, and its image by the multiplication with G is the
subgroup generated by B. Therefore P belongs to this subgroup.
Corollary:
For all points P generated using the equations, 8*P is guaranteed to be
in the subgroup or order l (which is prime) generated by B.
Another property: given a point P1 in the subgroup generated by B, the point
P2 = ((l + 1) / 2) * P1 is a "half" (or square root) of P1: 2 * P2 = P1.
This can be generalized for every divisor (or roots), using modular inverses
modulo l (which is prime).
Documentation:
* https://en.wikipedia.org/wiki/EdDSA
Ed25519 is the EdDSA signature scheme using SHA-512/256 and Curve25519
* https://en.wikipedia.org/wiki/Curve25519
* https://ed25519.cr.yp.to/
* https://ed25519.cr.yp.to/python/ed25519.py
* https://linux-audit.com/using-ed25519-openssh-keys-instead-of-dsa-rsa-ecdsa/
Using Ed25519 for OpenSSH keys (instead of DSA/RSA/ECDSA)
* https://tools.ietf.org/html/rfc7748
Elliptic Curves for Security
* https://github.com/warner/python-pure25519/blob/c88a6aeee0653c46c429f096ec3090388f77525a/misc/djbec.py
Pure python implementation of Ed25519 digital signatures
* https://github.com/monero-ecosystem/monero-python/blob/master/monero/ed25519.py
Monero's pure python implementation of Ed25519 digital signatures
"""
import argparse
import base64
import binascii
import errno
import hashlib
import logging
import os
import struct
import subprocess
import sys
import tempfile
try:
import Cryptodome.Util.asn1
has_cryptodome = True
except ImportError:
has_cryptodome = False
try:
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey, Ed25519PublicKey
has_cryptography = True
except ImportError:
sys.stderr.write("Warning: cryptography fails to load. Proceeding without it\n")
has_cryptography = False
try:
import nacl.bindings
# Only support nacl>=1.4.0 (https://github.com/pyca/pynacl/pull/528 and
# https://github.com/pyca/pynacl/commit/0e2ae90ac8bdc8f3cddf04d58a71da68678e6816 )
has_nacl = hasattr(nacl.bindings, "crypto_scalarmult_ed25519_noclamp")
if not has_nacl:
# Ensure that version is below 1.4.0, to detect issues
assert tuple(nacl.__version__.split(".")) < ("1", "4")
except ImportError:
sys.stderr.write("Warning: nacl fails to load. Proceeding without it\n")
has_nacl = False
logger = logging.getLogger(__name__)
COLOR_RED = '\033[31m'
COLOR_GREEN = '\033[32m'
COLOR_PURPLE = '\033[35m'
COLOR_NORM = '\033[m'
ED25519_PRIME = 2**255 - 19
ED25519_BITS = 256
ED25519_I = pow(2, (ED25519_PRIME - 1) // 4, ED25519_PRIME) # sqrt(-1) in F_q
def run_process_with_input(cmdline, data, color=None):
"""Run the given command with the given data and show its output in colors"""
print("Output of \"{}\":".format(' '.join(cmdline)))
if color:
sys.stdout.write(color)
sys.stdout.flush()
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE)
proc.stdin.write(data)
proc.stdin.close()
ret = proc.wait()
if color:
sys.stdout.write(COLOR_NORM)
sys.stdout.flush()
if ret != 0:
logger.error("command %s returned %d", ' '.join(cmdline), ret)
return False
return True
def hexdump(data, color=''):
"""Show an hexadecimal dump of binary data"""
if color:
sys.stdout.write(color)
for iline in range(0, len(data), 16):
hexline = ''
ascline = ''
for i in range(16):
if iline + i >= len(data):
hexline += ' '
else:
# pylint: disable=invalid-name
x = data[iline + i] if sys.version_info >= (3,) else ord(data[iline + i])
hexline += '{:02x}'.format(x)
ascline += chr(x) if 32 <= x < 127 else '.'
if i % 2:
hexline += ' '
print(" {:06x}: {} {}".format(iline, hexline, ascline))
if color:
sys.stdout.write(COLOR_NORM)
def xx(data):
"""One-line hexadecimal representation of binary data"""
if sys.version_info < (3, 5):
return binascii.hexlify(data).decode('ascii')
return data.hex()
def decode_bigint_le(data):
"""Decode a Little-Endian big integer"""
if sys.version_info < (3,):
return sum(ord(x) << (8 * i) for i, x in enumerate(data))
if sys.version_info < (3, 2):
return sum(x << (8 * i) for i, x in enumerate(data))
return int.from_bytes(data, 'little')
def encode_bigint_le(value, bytelen=None):
"""Encode a Little-Endian big integer"""
if bytelen is None:
bytelen = (value.bit_length() + 7) // 8
if sys.version_info < (3, 2):
data = bytearray(bytelen)
for i in range(bytelen):
data[i] = value & 0xff
value >>= 8
assert value == 0
return bytes(data)
return value.to_bytes(bytelen, 'little')
# pylint: disable=invalid-name
def extended_gcd(aa, bb):
"""Extended greatest common divisor
from https://rosettacode.org/wiki/Modular_inverse#Python
"""
lastremainder, remainder |
birlrobotics/HMM | hmm_for_baxter_using_only_success_trials/emission_log_prob_plot.py | Python | bsd-3-clause | 5,609 | 0.008736 | #!/usr/bin/env python
import os
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from math import (
log,
exp
)
from matplotlib import pyplot as plt
import time
import util
import math
import ipdb
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
def plot_log_prob_of_all_trials(
gradient_traj_by_time,
list_of_log_prob_mat,
log_prob_owner,
state_no,
figure_save_path
):
trial_amount = len(list_of_log_prob_mat)
hidden_state_amount = list_of_log_prob_mat[0].shape[1]
subplot_per_row = 3
subplot_amount = trial_amount*3
row_amount = int(math.ceil(float(subplot_amount)/subplot_per_row))
fig, ax_mat = plt.subplots(nrows=row_amount, ncols=subplot_per_row)
if row_amount == 1:
ax_mat = ax_mat.reshape(1, -1)
ax_list = []
for i in range(trial_amount):
j = 3*i
row_no = j/subplot_per_row
col_no = j%subplot_per_row
ax_list.append(ax_mat[row_no, col_no])
j = 3*i+1
row_no = j/subplot_per_row
col_no = j%subplot_per_row
ax_list.append(ax_mat[row_no, col_no])
j = 3*i+2
row_no = j/subplot_per_row
col_no = j%subplot_per_row
ax_list.append(ax_mat[row_no, col_no])
from matplotlib.pyplot import cm
import numpy as np
colors_for_hstate = cm.rainbow(np.linspace(0, 1, hidden_state_amount))
for trial_no in range(trial_amount):
log_prob_mat = list_of_log_prob_mat[trial_no][:, :].transpose()
plot_idx = 3*trial_no+1
for hstate_no in range(hidden_state_amount):
ax_list[plot_idx].plot(log_prob_mat[hstate_no].tolist(), linestyle="solid", color=colors_for_hstate[hstate_no])
trial_name = log_prob_owner[trial_no]
ax_list[plot_idx].set_title('emission probabilities of %s hidden states'%hidden_state_amount)
ax_list[plot_idx].set_xlabel('time step')
ax_list[plot_idx].set_ylabel('log probability')
ymax = np.max(log_prob_mat)
ax_list[plot_idx].set_ylim(ymin=0, ymax=ymax)
vp_t | riangle_img = open(
os.path.join(
figure_save_path,
'check_if_viterbi_path_grow_incrementally',
"state_%s"%state_no,
"%s.png"%trial_name,
),
'rb',
)
import matplotlib.image as mpi | mg
img=mpimg.imread(vp_triangle_img)
plot_idx = 3*trial_no
ax_list[plot_idx].imshow(img)
ax_list[plot_idx].set_title('growing viterbi paths')
ax_list[plot_idx].set_ylabel('time step')
ax_list[plot_idx].set_xlabel('length of viterbi path')
plot_idx = 3*trial_no+2
ax_list[plot_idx].plot(gradient_traj_by_time[trial_no].tolist()[0], linestyle="solid", color='black')
ax_list[plot_idx].set_title('gradient of log-likelihood')
ax_list[plot_idx].set_xlabel('time step')
ax_list[plot_idx].set_ylabel('log probability')
ymax = np.max(log_prob_mat)
fig.set_size_inches(4*subplot_per_row, 4*row_amount)
if not os.path.isdir(figure_save_path+'/emission_log_prob_plot'):
os.makedirs(figure_save_path+'/emission_log_prob_plot')
title = 'state %s emission log_prob plot'%(state_no,)
fig.tight_layout()
fig.savefig(os.path.join(figure_save_path, 'emission_log_prob_plot', title+".eps"), format="eps")
fig.savefig(os.path.join(figure_save_path, 'emission_log_prob_plot', title+".png"), format="png")
plt.close(1)
def run(model_save_path,
figure_save_path,
threshold_c_value,
trials_group_by_folder_name):
trials_group_by_folder_name = util.make_trials_of_each_state_the_same_length(trials_group_by_folder_name)
one_trial_data_group_by_state = trials_group_by_folder_name.itervalues().next()
state_amount = len(one_trial_data_group_by_state)
threshold_constant = 10
threshold_offset = 10
model_group_by_state = {}
for state_no in range(1, state_amount+1):
try:
model_group_by_state[state_no] = joblib.load(model_save_path+"/model_s%s.pkl"%(state_no,))
except IOError:
print 'model of state %s not found'%(state_no,)
continue
expected_log = []
std_of_log = []
deri_threshold = []
for state_no in model_group_by_state:
all_log_curves_of_this_state = []
list_of_log_prob_mat = []
log_prob_owner = []
for trial_name in trials_group_by_folder_name:
log_prob_owner.append(trial_name)
emission_log_prob_mat = util.get_emission_log_prob_matrix(
trials_group_by_folder_name[trial_name][state_no],
model_group_by_state[state_no]
)
list_of_log_prob_mat.append(emission_log_prob_mat)
one_log_curve_of_this_state = util.fast_log_curve_calculation(
trials_group_by_folder_name[trial_name][state_no],
model_group_by_state[state_no]
)
all_log_curves_of_this_state.append(one_log_curve_of_this_state)
# use np matrix to facilitate the computation of mean curve and std
np_matrix_traj_by_time = np.matrix(all_log_curves_of_this_state)
gradient_traj_by_time = np_matrix_traj_by_time[:, 1:]-np_matrix_traj_by_time[:, :-1]
plot_log_prob_of_all_trials(
gradient_traj_by_time,
list_of_log_prob_mat,
log_prob_owner,
state_no,
figure_save_path)
|
edx/edx-e2e-tests | regression/tests/studio/test_course_outline.py | Python | agpl-3.0 | 2,675 | 0.000748 | """
End to end tests for Studio Course Outline page
"""
import os
from unittest import skip
from bok_choy.web_app_test import WebAppTest
from edxapp_acceptance.pages.common.utils import assert_side_bar_help_link
from regression.pages.studio import EDXAPP_CMS_DOC_LINK_BASE_URL
from regression.pages.studio.course_outline_page import CourseOutlinePageExtended
from regression.pages.studio.login_studio import StudioLogin
from regression.pages.studio.studio_home import DashboardPageExtended
from regression.tests.helpers.utils import get_course_display_name, get_course_info
DEMO_COURSE_USER = os.environ.get('USER_LOGIN_EMAIL')
DEMO_COURSE_PASSWORD = os.environ.get('USER_LOGIN_PASSWORD')
class StudioCourseOutlineTest(WebAppTest):
"""Tests of the Course Outline in Studio."""
@skip("Skip since studio's login/logout now redirects to LMS (ARCH-323)")
def test_course_outline(self):
"""
Verifies that the Help link for 'Learn more about content
visibility settings' is working.
"""
studio_login_page = StudioLogin(self.browser)
studio_home_page = DashboardPageExtended(self.browser)
studio_login_page.visit()
studio_login_page.login(DEMO_COURSE_USER, DEMO_COURSE_PASSWORD)
course_info = get_course_info()
studio_course_outline = CourseOutlinePageExtended(
self.browser, course_info['org'], course_info['number'],
course_info['run'])
# Verification only, should be on this page after login.
studio_home_page.wait_for_page()
# Navigate to the course's outline page
studio_home_page.select_course(get_course_display_name())
studio_course_outline.wait_for_page()
# First verify the Help link
expected_href = EDXAPP_CMS_DOC_LINK_BASE_URL + \
'/en/latest/developing_course/controlling_content_visibility.html'
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=studio_course_outline,
href=expected_href,
help_text='Learn more about content visibility settings',
as_list_item=False
)
# If the help page is still up (see LT-53) | , then close it.
if self.browser.current_url.startswith('https://edx.readthedocs.io'):
# TODO wrap this i | n a try/except block or otherwise harden,
# make sure that you now have an active window (the other one)
# and it's the right one (i.e. Studio or LMS)
self.browser.close() # close only the current window
self.browser.switch_to_window(self.browser.window_handles[0])
|
ltyscu/Analysis-of-CGPs-Mechanisms | bit_behavior.py | Python | bsd-2-clause | 2,708 | 0.003693 | '''
Takes file names from the final/ folder as command line arguments
and parses the semantic information to produce the contents of Table VI.
Use this module as an executable to process each problem's results:
``python bit_behavior final/decode_*.dat.gz``
Note: Do not mix results from different problems.
'''
import json
import sys
from os import path
from collections import defaultdict
from util import find_median, open_file_method, pretty_name
if __name__ == '__main__':
filecount = 0
# which statistics to pull out of the files
interesting = [
'active_nodes_changed', # How many nodes that were active before and after the mutation changed behavior at least 1 bit
'reactivated_nodes', # How many nodes did active -> inactive -> active
'inactive_bits_changed', # For nodes that were active -> inactive -> active, how many bits changed in the process
]
# Collect information from the files
combined = defaultdict(lambda: defaultdict(list))
for filename in sys.argv[1:]:
base = path.basename(filename)
try:
print 'Processing file', filename
with open_file_method(filename)(filename, 'r') as f:
data = json.load(f)
# Converts the filename into a key
version = tuple(base.split('_')[1:3])
# extract each statistic
for test in interesting:
result = data[1][test]
try:
percentage = result['0'] / float(sum(result.values()))
except KeyError:
percentage = 0
# Inverts to become inactive_bits_unchnaged
if test == 'inactive_bits_changed':
percentage = 1 - percentage
combined[version][test].append(percentage * 100)
filecount += 1
except ValueError:
print filename, "FAILED"
print "Loaded", filecount
# Finds the median results for each of the statistics
for version, table in combined.items():
for test, line in sorted(table.items()):
| combined[version][test] = find_median(line)
# print the information in sorte | d order based on the first key
for version in sorted(combined.keys(), key=lambda version: combined[version][interesting[0]]):
duplicate, ordering = version
duplicate = ('\emph{%s}' % pretty_name[duplicate]).rjust(18)
ordering = ('\emph{%s}' % pretty_name[ordering]).rjust(14)
# LaTeX formatting
print ' & '.join([duplicate, ordering] + ["{0:.2f}\%".format(combined[version][test])
for test in interesting]) + ' \\\\ \hline'
|
kevinkle/semantic | superphy/src/upload/python/_sparql.py | Python | apache-2.0 | 11,605 | 0.000431 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
This module wraps often-used queries to the Blazegraph SPARQL endpoint.
"""
#from SPARQLWrapper import JSON, SPARQLWrapper
from superphy.shared.endpoint import query as _sparql_query
from superphy.shared.endpoint import update as _sparql_update
__author__ = "Stephen Kan"
__copyright__ = """
© Copyright Government of Canada 2012-2015. Funded by the Government of
Canada Genomics Research and Development Initiative
"""
__license__ = "ASL"
__version__ = "2.0"
__maintainer__ = "Stephen Kan"
__email__ = "stebokan@gmail.com"
def find_from_host(host):
"""
Finds the correct isolation_from_host instance in Blazegraph given a host
descriptor, or returns none if nothing is found
Args:
host: a term used to identify the host (common or scientifi name,
generally)
Returns: the SPARQL URI for the associated isolation_from_host object or
None
"""
results = _sparql_query(
'PREFIX : <https://github.com/superphy#>\n'
'SELECT ?p WHERE {?s ?o "%s"^^xsd:string . ?s :is_object_of ?p . ?p \
rdf:type :isolation_from_host}' % host
)
return results["results"]["bindings"][0]["p"]["value"].split("#", 1)[1]
def find_syndrome(syndrome):
"""
Finds the correct isolation_syndrome instance in Blazegraph given a term,
or returns none if nothing is found
Args:
syndrome: a term used to identify the isolation_syndrome
Returns: the SPARQL URI for the associated isolation_syndrome or None
"""
results = _sparql_query(
'PREFIX : <https://github.com/superphy#>\n'
'SELECT ?s WHERE {'
'?s ?o "%s"^^xsd:string .'
'?s rdf:type :isolation_syndrome .'
'}' % syndrome
)
return results["results"]["bindings"][0]["s"]["value"].split("#", 1)[1]
def find_source(source):
"""
Finds the correct isolation_from_source instance in Blazegraph given a
term, or returns none if nothing is found
Args:
source: a term used to identify the isolation_from_source
Returns: the SPARQL URI for the associated isolation_from_source or None
"""
results = _sparql_query(
'PREFIX : <https://github.com/superphy#>\n'
'SELECT ?s WHERE {'
'?s ?o "%s"^^xsd:string .'
'?s rdf:type :isolation_from_source'
'}' % source
)
return results["results"]["bindings"][0]["s"]["value"].split("#", 1)[1]
def check_named_individual(name):
"""
Checks to see if a given SPARQL URI is an instance of any RDF class encoded
into the database
Args:
name: the SPARQL URI of the instance
| (must be from the superphyontology)
Returns: a boolean indicating if the instance exists or not in the database
"""
results = _sparql_query(
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n'
'PREFIX owl: <http://www.w3.org/2002/07/owl#>\n'
'PREFIX : <https://github.com/superphy#>\n'
'ASK { :%s rdf:type owl:NamedIndividual .}' % name
)
return results["boolean"]
def find_missing_sequences():
"""
Finds Ge | nome instances in Blazegraph that are missing a sequence and hasn't
failed sequence validation
Returns: list of SPARQL URIs for Genome instances
"""
results = _sparql_query(
'PREFIX : <https://github.com/superphy#>\n'
'PREFIX gfvo: <http://www.biointerchange.org/gfvo#>\n'
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n'
'SELECT ?s ?acc WHERE { ?s rdf:type gfvo:Genome . \
?s :has_accession ?acc . '
'MINUS { ?s :has_valid_sequence ?o }}'
)
return ((result["s"]["value"].rsplit("#", 1)[1], result["acc"]["value"])
for result in results["results"]["bindings"])
def check_validation(genome):
"""
Checks to see if a particular genome has already had its sequence validated
Args:
genome(str): A genome's accession number
Returns: a boolean indication if the genome has been through validation
(whether validation was true or false)
"""
results = _sparql_query(
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n'
'PREFIX owl: <http://www.w3.org/2002/07/owl#>\n'
'PREFIX : <https://github.com/superphy#>\n'
'ASK { :%s :has_valid_sequence ?o .}' % genome
)
return results["boolean"]
def find_duplicate_biosamples():
"""
Checks to see if a BioSample id is unique or not; if it is not, identify
all Genomes that refer to it
Returns: a list of tuples composed of a BioSample id and a list of SPARQL
URIs for Genomes
"""
results = _sparql_query(
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n'
'PREFIX : <https://github.com/superphy#>\n'
'PREFIX gfvo: <http://www.biointerchange.org/gfvo#>\n'
'SELECT ?BioSample (GROUP_CONCAT( ?Genome ; SEPARATOR = "#") AS \
?Genomes) (COUNT (?Genome) AS ?Elements)\n'
'WHERE { ?Genome rdf:type gfvo:Genome . ?Genome :has_biosample \
?BioSample . '
'MINUS { ?Genome :has_sequence ?Sequence . ?Sequence :is_from \
"WGS"^^xsd:string .}}\n'
'GROUP BY ?BioSample HAVING ( ?Elements > 1)'
)
return (
(
result["BioSample"]["value"],
result["Genomes"]["value"].split("#", )[1::2]
) for result in results["results"]["bindings"])
def find_core_genome(biosample):
"""
Finds all Genomes with the specified BioSample id that are core genomes
(labelled with CORE)
Args:
biosample: BioSample id of interest
Returns: a list of SPARQL URIs of Genomes that match the BioSample and are
core genomes
"""
results = _sparql_query(
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n'
'PREFIX : <https://github.com/superphy#>\n'
'PREFIX gfvo: <http://www.biointerchange.org/gfvo#>\n'
'SELECT ?Genome \n'
'WHERE {'
'?Genome rdf:type gfvo:Genome .'
'?Genome :has_biosample "%s"^^xsd:string .'
'?Genome :has_sequence ?Sequence .'
'?Sequence :is_from "CORE"^^xsd:string .'
'}' % biosample
)
return [result["Genome"]["value"].split("#", 1)[1] for result in \
results["results"]["bindings"]]
def find_genome(accession):
"""
Finds the genome instance in Blazegraph. Returns None if nothing is found.
Args:
genome: genome accession number
Returns: the SPARQL URI for the associated genome instance. Returns None if
nothing found.
"""
query = (
'PREFIX : <https://github.com/superphy#>\n'
'SELECT ?s WHERE {?s :has_accession "%s" . }' % accession
)
results = _sparql_query(query)
return results["results"]["bindings"][0]["s"]["value"]
def has_ref_gene(gene_name):
"""
Determines if a particular gene already has a genome its sequence is
referenced from
Args:
gene_name(str): name of the gene
Returns: A boolean, T if is has a reference_gene tag, false if not.
"""
results = _sparql_query(
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n'
'PREFIX : <https://github.com/superphy#>\n'
'ASK {'
':%s :has_copy ?location .'
'?location rdf:type :reference_gene'
'}' % gene_name
)
return results["boolean"]
def delete_instance(name):
"""
Deletes an instance with a given SPARQL URI on the database by removing all
triples with it
(assumption:not a predicate, but then predicates aren't instances)
Args:
name: the SPARQL URI of the instance you want to delete
Prints out the response from the server regarding the SPARQL Update query
"""
print _sparql_update(
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n'
'PREFIX : <https://github.com/superphy#>\n'
'DELETE { :%s ?property ?object . ?subject ?property :%s . }\n'
'WHERE {\n'
'{ :%s ?property ?object }\n'
'UNION\n'
'{ ?subject ?property :%s } }' % (name, name, name, nam |
saghul/aiodns | aiodns/__init__.py | Python | mit | 5,081 | 0.005511 |
import asyncio
import functools
import pycares
import socket
from typing import (
Any,
List,
Optional,
Set
)
from . import error
__version__ = '3.0.0'
__all__ = ('DNSResolver', 'error')
READ = 1
WRITE = 2
query_type_map = {'A' : pycares.QUERY_TYPE_A,
'AAAA' : pycares.QUERY_TYPE_AAAA,
'ANY' : pycares.QUERY_TYPE_ANY,
'CAA' : pycares.QUERY_TYPE_CAA,
'CNAME' : pycares.QUERY_TYPE_CNAME,
'MX' : pycares.QUERY_TYPE_MX,
'NAPTR' : pycares.QUERY_TYPE_NAPTR,
'NS' : pycares.QUERY_TYPE_NS,
'PTR' : pycares.QUERY_TYPE_PTR,
'SOA' : pycares.QUERY_TYPE_SOA,
'SRV' : pycares.QUERY_TYPE_SRV,
'TXT' : pycares.QUERY_TYPE_TXT
}
query_class_map = {'IN' : pycares.QUERY_CLASS_IN,
'CHAOS' : pycares.QUERY_CLASS_CHAOS,
'HS' : pycares.QUERY_CLASS_HS,
'NONE' : pycares.QUERY_CLASS_NONE,
'ANY' : pycares.QUERY_CLASS_ANY
}
class DNSResolver:
def __init__(self, nameservers: Optional[List[str]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
**kwargs: Any) -> None:
self.loop = loop or asyncio.get_event_loop()
assert self.loop is not None
kwargs.pop('sock_state_cb', None)
self._channel = pycares.Channel(sock_state_cb=self._sock_state_cb, **kwargs)
if nameservers:
self.nameservers = nameservers
self._read_fds = set() # type: Set[int]
self._write_fds = set() # type: Set[int]
self._timer | = None # type: Optional[asyncio.TimerHandle]
@property
def nameservers(self) -> pycares.Channel:
return self._channel.servers
@nameservers.setter
def nameservers(self, value: List[str]) -> None:
self._channel.servers = value
@staticmethod
def _callback(fut: asyncio.Future, result: Any, errorno: int) -> None:
if fut.cancelled():
return
if errorno is not None:
| fut.set_exception(error.DNSError(errorno, pycares.errno.strerror(errorno)))
else:
fut.set_result(result)
def query(self, host: str, qtype: str, qclass: str=None) -> asyncio.Future:
try:
qtype = query_type_map[qtype]
except KeyError:
raise ValueError('invalid query type: {}'.format(qtype))
if qclass is not None:
try:
qclass = query_class_map[qclass]
except KeyError:
raise ValueError('invalid query class: {}'.format(qclass))
fut = asyncio.Future(loop=self.loop) # type: asyncio.Future
cb = functools.partial(self._callback, fut)
self._channel.query(host, qtype, cb, query_class=qclass)
return fut
def gethostbyname(self, host: str, family: socket.AddressFamily) -> asyncio.Future:
fut = asyncio.Future(loop=self.loop) # type: asyncio.Future
cb = functools.partial(self._callback, fut)
self._channel.gethostbyname(host, family, cb)
return fut
def gethostbyaddr(self, name: str) -> asyncio.Future:
fut = asyncio.Future(loop=self.loop) # type: asyncio.Future
cb = functools.partial(self._callback, fut)
self._channel.gethostbyaddr(name, cb)
return fut
def cancel(self) -> None:
self._channel.cancel()
def _sock_state_cb(self, fd: int, readable: bool, writable: bool) -> None:
if readable or writable:
if readable:
self.loop.add_reader(fd, self._handle_event, fd, READ)
self._read_fds.add(fd)
if writable:
self.loop.add_writer(fd, self._handle_event, fd, WRITE)
self._write_fds.add(fd)
if self._timer is None:
self._timer = self.loop.call_later(1.0, self._timer_cb)
else:
# socket is now closed
if fd in self._read_fds:
self._read_fds.discard(fd)
self.loop.remove_reader(fd)
if fd in self._write_fds:
self._write_fds.discard(fd)
self.loop.remove_writer(fd)
if not self._read_fds and not self._write_fds and self._timer is not None:
self._timer.cancel()
self._timer = None
def _handle_event(self, fd: int, event: Any) -> None:
read_fd = pycares.ARES_SOCKET_BAD
write_fd = pycares.ARES_SOCKET_BAD
if event == READ:
read_fd = fd
elif event == WRITE:
write_fd = fd
self._channel.process_fd(read_fd, write_fd)
def _timer_cb(self) -> None:
if self._read_fds or self._write_fds:
self._channel.process_fd(pycares.ARES_SOCKET_BAD, pycares.ARES_SOCKET_BAD)
self._timer = self.loop.call_later(1.0, self._timer_cb)
else:
self._timer = None
|
BackupTheBerlios/pyhttpd-svn | core/baseHTTPServer.py | Python | gpl-2.0 | 2,489 | 0.045802 | # -*- coding: utf-8 -*-
##################################################################
# pyHTTPd
# $Id$
# (c) 2006 by Tim Taubert
##################################################################
import socket, sys, os, threading
class pHTTPServer:
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = False
daemon_threads = False
def __init__(self, server_address, RequestHandlerClass):
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.socket = socket.socket(self.address_family, self.socket_type)
self.server_bind()
self.server_activate()
def server_bind(self):
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def server_activate(self):
self.socket.listen(self.request_queue_size)
def serve_forever(self):
while 1:
self.handle_request()
def get_request(self):
return self.socket.accept()
def handle_request(self):
try:
request, client_address = self.get_reque | st()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def verify_request(self, request, client_address):
return True
def process_request_thread(self, request, client_address):
try:
self.finish_request(request, client_address)
self.close_req | uest(request)
except:
self.handle_error(request, client_address)
self.close_request(request)
def process_request(self, request, client_address):
t = threading.Thread(target = self.process_request_thread,
args = (request, client_address))
if self.daemon_threads:
t.setDaemon (1)
t.start()
def server_close(self):
self.socket.close()
def finish_request(self, request, client_address):
self.RequestHandlerClass(request, client_address, self)
def close_request(self, request):
request.close()
def fileno(self):
return self.socket.fileno()
def handle_error(self, request, client_address):
print '-'*40
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc() # XXX But this goes to stderr!
print '-'*40
|
openconfig/oc-pyang | openconfig_pyang/plugins/util/html_emitter.py | Python | apache-2.0 | 14,661 | 0.017871 | """
Copyright 2015 Google, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Implements an HTML documentation emitter for YANG modules
"""
import os
import re
from xml.etree import ElementTree as ET
from jinja2 imp | ort Environment, FileSystemLoader
from .doc_emitter import DocEmitter
from .yangdoc_defs import YangDocDefs
from . import html_helper
fro | m . import yangpath
class HTMLEmitter(DocEmitter):
def genModuleDoc(self, mod, ctx):
"""HTML emitter for top-level module documentation given a
ModuleDoc object"""
ht = html_helper.HTMLHelper()
# TODO: this is far too hardcoded
mod_div = ht.open_tag("div", newline=True)
# module name
mod_div += ht.h1(mod.module_name, {"class": "module-name", "id": ("mod-" + ht.gen_html_id(mod.module_name))},2,True)
if 'version' in mod.module.attrs:
mod_div += ht.h4("openconfig-version: " + mod.module.attrs['version'], {"class": "module-header"},2,True)
# module description header
mod_div += ht.h4("Description", {"class": "module-desc-header"},2,True)
# module description text
paragraphs = text_to_paragraphs(mod.module.attrs['desc'])
for para in paragraphs:
mod_div += ht.para(para, {"class": "module-desc-text"},2,True)
mod_div += ht.h4("Imports", {"class": "module-header"},2,True)
mod_div += "<p class=\"module-desc-text\">"
for i in mod.module.attrs['imports']:
mod_div += "%s<br>\n" % i
mod_div += "</p>\n"
mod_div += ht.close_tag(newline=True)
# initialize and store in the module docs
self.moduledocs[mod.module_name] = {}
self.moduledocs[mod.module_name]['module'] = mod_div
self.moduledocs[mod.module_name]['data'] = ""
# handle typedefs
if len(mod.typedefs) > 0:
types_div = ht.open_tag("div", newline=True)
types_div += ht.h3("Defined types", {"class": "module-types-header", "id": mod.module_name + "-defined-types"},2,True)
for (typename, td) in mod.typedefs.items():
types_div += ht.h4(typename,{"class": "module-type-name","id": "type-" + ht.gen_html_id(typename)},2,True)
types_div += ht.para(ht.add_tag("span","description:" + ht.br(newline=True), {"class": "module-type-text-label"}) + td.attrs['desc'],{"class": "module-type-text"},2,True)
types_div += gen_type_info(td.typedoc, 2)
for prop in YangDocDefs.type_leaf_properties:
if prop in td.attrs:
types_div += ht.para(ht.add_tag("span", prop,{"class": "module-type-text-label"}) + ": " + td.attrs[prop],{"class": "module-type-text"},2,True)
types_div += ht.close_tag(newline=True)
else:
# module doesn't have any typedefs
types_div = ""
# store the typedef docs
self.moduledocs[mod.module_name]['typedefs'] = types_div
# handle identities
if len(mod.identities) > 0:
idents_div = ht.open_tag("div", newline=True)
idents_div += ht.h3("Identities", {"class": "module-types-header", "id": mod.module_name + "-identities"},2,True)
for base_id in mod.base_identities:
idents_div += ht.h4("base: " + base_id,{"class": "module-type-name","id":"ident-" + ht.gen_html_id(base_id)},2,True)
idents_div += ht.para(ht.add_tag("span","description:" + ht.br(newline=True), {"class": "module-type-text-label"}) + mod.identities[base_id].attrs['desc'],{"class": "module-type-text"},2,True)
# collect all of the identities that have base_id as
# their base
# TODO(aashaikh): this needs to be updated to handle nested identities / multiple inheritance
derived = { key:value for key,value in mod.identities.items() if value.attrs['base'] == base_id }
# emit the identities derived from the current base
for (idname, id) in derived.items():
idents_div += ht.h4(idname,{"class": "module-type-name","id":"ident-" + ht.gen_html_id(idname)},2,True)
idents_div += ht.para(ht.add_tag("span","description:",{"class": "module-type-text-label"}) + ht.br(newline=True) + id.attrs['desc'],{"class":"module-type-text"},2,True)
idents_div += ht.para(ht.add_tag("span", "base identity: ",{"class": "module-type-text-label"})
+ ht.add_tag("a", id.attrs['base'],{"href":"#ident-"+ht.gen_html_id(id.attrs['base'])}),
{"class":"module-type-text"},2,True)
idents_div += ht.close_tag(newline=True)
else:
# module doesn't have any identities
idents_div = ""
# store the identity docs
self.moduledocs[mod.module_name]['identities'] = idents_div
gen_nav_tree(self, mod, 0)
def genStatementDoc(self, statement, ctx, level=1):
"""HTML emitter for module data node given a StatementDoc
object"""
if ctx.opts.no_structure and statement.keyword in ctx.skip_keywords:
return
ht = html_helper.HTMLHelper()
s_div = ht.open_tag("div", {"class":"statement-section"}, newline=True)
if ctx.opts.strip_namespace:
pathstr = yangpath.strip_namespace(statement.attrs['path'])
else:
pathstr = statement.attrs['path']
# for 'skipped' nodes, just print the path
if statement.keyword in self.path_only:
s_div += ht.h4(pathstr,None,level,True)
s_div += ht.close_tag(newline=True)
return s_div
# statement path and name
(prefix, last) = yangpath.remove_last(pathstr)
prefix_name = ht.add_tag("span", prefix + "/", {"class": "statement-path"})
statement_name = prefix_name + ht.br(level,True) + statement.name
s_div += ht.h4(statement_name, {"class": "statement-name","id":statement.attrs['id']},level,True)
# node description
if 'desc' in statement.attrs:
s_div += ht.para(ht.add_tag("span", "description",{"class": "statement-info-label"}) + ":<br />" + statement.attrs['desc'],{"class": "statement-info-text"},level,True)
s_div += ht.close_tag(newline=True)
# check for additional properties
notes = ""
if statement.attrs['is_key']:
notes += " (list key)"
if statement.attrs['config']:
notes += " (rw)"
else:
notes += " (ro)"
keyword = statement.keyword + notes
s_div += ht.para(ht.add_tag("span", "nodetype",{"class": "statement-info-label"}) + ": " + keyword,{"class": "statement-info-text"},level,True)
# s_div += ht.para(ht.add_tag("span", "path",{"class":"statement-info-label"}) + ": " + pathstr,{"class":"statement-info-text"},level,True)
# handle list nodes
if statement.attrs['is_list']:
list_keys = ""
for key in statement.attrs['keys']:
list_keys += " [" + ht.add_tag("a", key[0], {"href":"#" + key[1]}) + "]"
s_div += ht.para(ht.add_tag("span", "list keys",{"class": "statement-info-label"}) + ": " + list_keys,{"class": "statement-info-text"},level,True)
if statement.typedoc:
s_div += gen_type_info(statement.typedoc, level)
for prop in YangDocDefs.type_leaf_properties:
if prop in statement.attrs:
s_div += ht.para(ht.add_tag("span", prop, {"class": "statement-info-label"}) + ": " + statement.attrs[prop],{"class": "statement-info-text"},level,True)
# add this statement to the collection of data
self.moduledocs[statement.module_doc.module_name]['data'] += s_div
def emitDocs(self, ctx, section=None):
"""Return the HTML output for all modules,
or single section if specified"""
ht = html_helper.HTMLHelper()
docs = []
navs = []
navids = []
# create the documentation elements for each module
for module_name in self.moduledocs:
# check if the module has no data nodes
if 'data' not in self.moduledocs[module_name]:
self.moduledocs[module_name]['data'] = ""
else:
# create the header for the |
gtuinsaat/TRKYH_Veritabani_programi-master | Kodlar/S-r-m 1/aad_kyh_process_function.py | Python | mit | 26,051 | 0.021569 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 14:46:48 2017
@author: User
"""
#%% MODULE IMPORT
def aad_process(file_url):
from IPython.core.display import display, HTML; display(HTML("<style>.container { width:95% !important; }</style>"))
from time import gmtime, strftime
import datetime
import numpy as np
import matplotlib.pyplot as plt
import os
# ----------------------------------
if os.path.isfile('earthquake_temp.txt')==1 : os.remove('earthquake_temp.txt') # geçici EQE dosyasını kaldıralım.
if os.path.isdir("temp_images")==0: os.mkdir('temp_images'); # Geçici şekil klasörünü yaratalım.
# ----------------------------------
print( strftime('-'*60+"\n%Y_%m%d-%H:%M:%S", gmtime()) + " Execution Started \n")
#==============================================================================
#%%
# EQE File URL reading
'''
STRONG GROUND MOTION RECORDS OF TURKIYE
PLACE : DUZCE MERKEZ METEOROLOJI ISTASYON MUDURLUGU
EARTHQUAKE DATE : 17/08/1999 00:01:39.07 (GMT)
'''
# file_url= "http://kyhdata.deprem.gov.tr/2K/genAcc.php?dst=TU9EVUxFX05BTUU9ZXZ0RmlsZSZNT0RVTEVfVEFTSz1kb3dubG9hZCZNT0RVTEVfU1VCVEFTSz1BTEwmTU9EVUxFX1RBUkdFVD1vbGQmVEFSR0VUPTE5OTkwODE3MDAwMTM5XzgxMDE%3D"
#file_url= 'http://kyhdata.deprem.gov.tr/2K/genAcc.php?dst=TU9EVUxFX05BTUU9ZXZ0RmlsZSZNT0RVTEVfVEFTSz1kb3dubG9hZCZNT0RVTEVfU1VCVEFTSz1BTEwmTU9EVUxFX1RBUkdFVD1vbGQmVEFSR0VUPTIwMTcwNzE5MDQ0MjUwXzEwMjE%3D'
#file_url="http://kyhdata.deprem.gov.tr/2K/genAcc.php?dst=TU9EVUxFX05BTUU9ZXZ0RmlsZSZNT0RVTEVfVEFTSz1kb3dubG9hZCZNT0RVTEVfU1VCVEFTSz1BTEwmTU9EVUxFX1RBUkdFVD1vbGQmVEFSR0VUPTIwMTcwNzIwMjIzMTEwXzQ4MDk%3D"
#%%
# EQE geçici dosya okuma
import urllib.request
urllib.request.urlretrieve(file_url , u'earthquake_temp.txt') ;
lines = [ line.rstrip('\n') for line in open('earthquake_temp.txt',encoding='Windows-1250')]
# HEADERLARIN DEĞİŞKENLERE ATANMASI
place = lines[1].split(':')[1][1:]
eqe_date = lines[2].split(' ')[-3]
eqe_time = lines[2].split(' ')[-2]
epicenter_coordinates = lines[3].split(':')[1][1:].split('-')
epicenter_latitude, epicenter_longitude = float(epicenter_coordinates[0][0:-1]) , float( epicenter_coordinates[1][0:-1])
eqe_depth = lines[4].split(':')[1][1:]
eqe_magnitude = lines[5].split(':')[1][1:]
station_id = lines[6].split(':')[1][1:]
station_coordinates = lines[7].split(':')[1][1:].split('-')
station_latitude, station_longitude = float(station_coordinates[0][0:-1]) , float( station_coordinates[1][0:-1])
station_altiude = lines[8].split(':')[1][1:]
recorder_type = lines[9].split(':')[1][1:]
recorder_serial_no = lines[10].split(':')[1][1:]
record_date = lines[11].split(' ')[-3]
record_time = lines[11].split(' ')[-2]
number_of_data = int( lines[12].split(':')[1][1:] )
sampling_interval = float( lines[13].split(':')[1][1:] )
raw_PGA_NS = lines[14].split(' ')[-5]
raw_PGA_EW = lines[14].split(' ')[-3]
raw_PGA_UD = lines[14].split(' ')[-1]
print( strftime('-'*60+"\n%Y_%m%d-%H:%M:%S", gmtime()) + " Başlık okuma tamam \n")
#%%
# EPICENTER HARİTASININI ÜRETİLMESİ ve gösterilmesi
from staticmap import StaticMap, CircleMarker, Line
m = StaticMap(500, 300, url_template='http://a.tile.osm.org/{z}/{x}/{y}.png')
marker_outline = CircleMarker((epicenter_longitude , epicenter_latitude), 'white', 18)
marker = CircleMarker((epicenter_longitude , epicenter_latitude), '#0036FF', 12)
m.add_marker(marker_outline)
m.add_marker(marker)
marker_outline = CircleMarker((station_longitude,station_latitude), 'red', 18)
marker = CircleMarker((station_longitude,station_latitude), '#0036FF', 12)
m.add_marker(marker_outline)
m.add_marker(marker)
m.add_line(Line(((epicenter_longitude , epicenter_latitude), (station_longitude,station_latitude)), 'blue', 3))
image = m.render(zoom=7)
image.save('./temp_images'+'/1-epicenter.png')
image.show()
print( strftime('-'*60+"\n%Y_%m%d-%H:%M:%S", gmtime()) + " Harita çizimi tamam \n")
#%%
# ISTASYON BILGILERININ İNCELENMESİ
time = sampling_interval * np.linspace(0 , number_of_data, num=number_of_data)
#print("Bugün için incelediğimiz dosya : "+ place +" istasyonunda kaydedilen depremdir")
#print( strftime("Incelenen \n"+ place +"\nistasyonunda kaydedilen depremdir"))
#%%
# PANDAS ile dosyayı okuma ve data bloğunun işlenmesi
import pandas as pd
data_raw_acceleration = pd.read_csv('earthquake_temp.txt', header=17, delim_whitespace = True , skipfooter=1 , engine='python')
data_raw_acceleration.describe()
#%% # HER BİR BİLEŞEN İÇİN HESAPLAMANIN YAPILMASI
# Ham ivme grafiklerinin çizilmesi
plt.rcParams.update({'font.size': 14})
plt.figure(figsize=[10,10] )
for counter , dogrultu_ismi in enumerate(['N-S','E-W','U-D']):
acceleration_ham = data_raw_acceleration[dogrultu_ismi] # Ham ivme verisi
plt.subplot(3,1,counter+1) ,
plt.plot(time, acceleration_ham )
plt.grid() ; plt.title(data_raw_acceleration.columns[counter]+' COMPONENT'),
if counter == 2: plt.xlabel('time (sec)');
plt.ylabel('Ground Acceleration (gal)')
plt.savefig('./temp_images/'+'2-ham_veriler.png')
print( strftime('-'*60+"\n%Y_%m%d-%H:%M:%S", gmtime()) + " Ham ivmeler grafiği tamam \n")
#%%
# İVME FİLTRELEME İŞLEMLERİ
#==============================================================================
from scipy import integrate
import numpy as np
import scipy.signal as signal
#==============================================================================
#= FONKSİYONLARIN TANIMLANMASI ==============================================================================
from scipy.signal import butter, lfilter
def butter_bandpass(lowcut, highcut, fs, order=4):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=4):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
# Sample rate and desired cutoff frequencies (in Hz).
fs = 100
lowcut = .05
highcut = 20.0
def aad_fft(accg,sampling_interval):
from scipy.fftpack import fft
import numpy as np
# Number of sample points
N = len(accg)
# sample spacing
T = sampling_interval
x = np.linspace(0.0, N*T, N)
y = accg
yf = fft(y) # This is the main part
xf = np.linspace(0.0, 1.0/(2.0*T), N//2)
amplitudes = 2.0/N * np.abs(yf[0:N//2])
return(xf , amplitudes)
#==============================================================================
data_raw_velocity = pd.DataFrame(column | s=['N-S','E-W','U-D']) # Bu boş bir dataframe
data_raw_displacement = pd.DataFrame(columns=['N-S','E-W','U-D']) # Bu boş bir dataframe
data_filtered_acceleration = pd.DataFrame(columns=['N-S','E-W','U-D']) # Bu boş bir dataframe
data_filtered_velocity = pd.DataFrame(columns=['N-S','E-W','U-D']) # Bu boş bir dataframe
data_filtered_displacement = pd.DataFrame(columns=['N-S','E-W','U-D']) # Bu boş bir dataframe
data_fft= pd.DataFrame(columns=['Frekans','N-S','E-W','U-D']) # Bu boş bir data | frame
plt.rcParams.update({'font.size': 10})
duration_Arias_intensity = [] # duration değerleri
for counter , dogrultu_ismi in enumerate(['N-S','E-W','U-D']):
#= HAM ZAMAN SERİLERİNİN ÇIKARTILMASI ======================================================================
acceleration = data_raw_acceleration[dogrultu_ismi] / 100 # Burada birim artık "m/s2" olmaktadır.
# acceleration = signal.detrend(acceleration, type='linear')
velocity = sampling_interval * integrate.cumtrapz(acceleration)
velocity = np.concatenate([[0],velocity]) # Serinin başına bir tane 0 ekledim.
# velocity = signal.detrend(velocity, type='linear')
data_raw_velocity[dogrultu_ismi] = velo |
Audiveris/omr-dataset-tools | tools/addNoise/FileOperations.py | Python | agpl-3.0 | 2,085 | 0.00048 | # -*- coding: utf-8 -*-
__author__ = 'Pulimootil'
"""
This class will handle all the file related operations
"""
import os
import cv2
class FileOperatoins(object):
def __init__(self, filename, xmlFile):
self.checkFile(filename, xmlFile)
''' Set the output folder '''
def setOutput(self, outputFolder):
self.outputFolder = outputFolder
''' Get the image file '''
def getImage(self, color):
if color:
self.img = cv2.imread(self.imgFilename, 1)
else:
self.img = cv2.imread(self.imgFilename, 0)
return self.img
''' Set the distorted image '''
def setDistortedImage(self, img):
self.distortedImage = img
''' Set the output image filename '''
def setOutputImageName(self, tag):
try:
self.outputName = self.outputFolder + os.path.sep + \
os.path.splitext(os.path.basename(self.imgFilename))[0] + tag + \
os.path.splitext(self.imgFilename)[1]
except AttributeError:
# if output folder not set, current working directory will be used.
self.setOutput(os.path.join(os.getcwd(), 'output'))
self.setOutputImageName(tag)
''' Save the distorted image file '''
def writeImage(self):
try:
cv2.imwrite(self.outputName, self.distortedImage)
except AttributeError:
print "Make sure that OutputImageName and Distorted images are set"
''' Display the | image '''
def show(self, orig):
cv2.namedWindow("Image")
cv2.imshow("Image", orig)
cv2.waitKey(0)
cv2.destroyAllWindows()
''' Check for valid files '''
def checkFile(self, | imgfilename, xmlFilename):
if os.path.isfile(imgfilename):
self.imgFilename = imgfilename
else:
raise 'Invalid image filename:' + imgfilename
if os.path.isfile(xmlFilename):
self.xmlFilename = xmlFilename
else:
raise 'Invalid xml filename:' + xmlFilename
|
tschmorleiz/amcat | amcat/models/coding/fieldcolumn.py | Python | agpl-3.0 | 1,767 | 0.010753 | from amcat.tools import toolkit
from amcat.tools.table.table3 import ObjectColumn
import logging; log = logging.getLogger(__name__)
class FieldColumn(ObjectColumn):
"""ObjectColumn based on a AnnotationSchemaField"""
def __init__(self, field, article=None, fieldname=None, label=None, fieldtype=None):
if fieldtype is None: fieldtype = field.getTargetType()
if article is None: article = field.schema.isarticleschema
if fieldname is None: fieldname = field.fieldname
if label is None: label = field.label
ObjectColumn.__init__(self, label, fieldname, fieldtype=fieldtype)
self.field = field
self.article = article
self.valuelabels = {}
def getUnit(self, row):
return row.ca if self.article else row.cs
def getCell(self, row):
try:
val = self.getValue(row)
return val
except AttributeError, e:
log.debug("AttributeError on getting %s.%s: %s" % (row, self.field, e))
raise
return None
def getValues(self, row):
unit = self.getUnit(row)
if unit is None: return None
values = unit.values
return values
def getValue(self, row, fieldname = None):
if not fieldname: fieldname = self.field.fieldname
log.debug(">>>>>> getValue(%r, %r)" % (row, fieldname))
values = self.getValues(row)
if values is None: return None
try:
val = getattr(values, fieldname)
except AttributeError:
log.error("%r has no field %r, but it has %r" | % | (values, fieldname, dir(values)))
raise
log.debug(">>>>>> values=%r, fieldname=%r, --> val=%s" % (values, fieldname, val))
return val
|
dwalton76/ev3dev-lang-python | utils/console_fonts.py | Python | mit | 2,278 | 0.004829 | #!/usr/bin/env micropython
from time import sleep
from sys import stderr
from os import listdir
from ev3dev2.console import Console
"""
Used to iterate over the sys | tem console fonts (in /usr/share/consolefonts) and show the max row/col.
Font names consist of three parameters - codeset, font face and font size. The codeset specifies
what characters will be supported by the | font. The font face determines the general look of the font. Each
font face is available in certain possible sizes.
For Codeset clarity, see https://www.systutorials.com/docs/linux/man/5-console-setup/#lbAP
"""
def show_fonts():
"""
Iterate through all the Latin "1 & 5" fonts, and see how many rows/columns
the EV3 LCD console can accommodate for each font.
Note: ``Terminus`` fonts are "thinner"; ``TerminusBold`` and ``VGA`` offer more contrast on the LCD console
and are thus more readable; the ``TomThumb`` font is waaaaay too small to read!
"""
console = Console()
files = [f for f in listdir("/usr/share/consolefonts/") if f.startswith("Lat15") and f.endswith(".psf.gz")]
files.sort()
fonts = []
for font in files:
console.set_font(font, True)
console.text_at(font, 1, 1, False, True)
console.clear_to_eol()
console.text_at("{}, {}".format(console.columns, console.rows),
column=2,
row=4,
reset_console=False,
inverse=False)
print("{}, {}, \"{}\"".format(console.columns, console.rows, font), file=stderr)
fonts.append((console.columns, console.rows, font))
fonts.sort(key=lambda f: (f[0], f[1], f[2]))
# Paint the screen full of numbers that represent the column number, reversing the even rows
for cols, rows, font in fonts:
print(cols, rows, font, file=stderr)
console.set_font(font, True)
for row in range(1, rows + 1):
for col in range(1, cols + 1):
console.text_at("{}".format(col % 10), col, row, False, (row % 2 == 0))
console.text_at(font.split(".")[0], 1, 1, False, True)
console.clear_to_eol()
# Show the fonts; you may want to adjust the ``startswith`` filter to show other codesets.
show_fonts()
sleep(5)
|
stinbetz/nice_ride_charting | datify.py | Python | mit | 7,682 | 0.002603 | import csv
import re
import datetime
import string
import collections
def get_nr_data():
''' returns a list of lists each entry represents one row of NiceRide data
in form -- [[11/1/2015, 21:55], '4th Street & 13th Ave SE', '30009',
[11/1/2015, 22:05], 'Logan Park', '30104', '565', 'Casual'] where the
indices are
0: [start_date, start_time]
1: start_station,
2: start_terminal,
3: [end_date, end_time]
4: end_station,
5: end_terminal,
6: duration (seconds),
7: account_type (member/casual)
'''
nr_datafile = open('NiceRideData2015.csv', 'r')
nr_data = []
reader = csv.reader(nr_datafile)
for line in reader:
nr_data.append(reader.next())
nr_datafile.close()
nr_data = nr_data[1:]
index = 0
for line in nr_data:
# print line
date_data = re.match('(\d+)/(\d+)/(\d+) (\d+):(\d+)', line[0])
start_date = datetime.date(int(date_data.group(3)),
int(date_data.group(1)),
int(date_data.group(2)))
start_time = datetime.time(int(date_data.group(4)),
int(date_data.group(5)),
0)
nr_data[index][0] = [sta | rt_date, start_time]
date_data = re.match('(\d+)/(\d+)/(\d+) (\d+):(\d+)', line[3])
end_date = datetime.date(int(date_data.group(3)),
int(date_data.group(1)),
int(date_data.group(2)))
end_time = datetime.time(int(date_data.group(4)),
| int(date_data.group(5)),
0)
nr_data[index][3] = [end_date, end_time]
index += 1
return nr_data
def get_wx_data(filename):
''' returns a list of lists, each entry represents a day of weather data in
the form -- ['1', '30', '11', '21', '5', '44', '0', 'T', 'T', '3', '10.4',
'20', '330', 'M', 'M', '8', '26', '330'] where the indices are
0: day_of_month,
1: max_temp,
2: min_temp,
3: avg_temp,
4: dev_from_norm,
5: heating/cooling_day,
6: tot_precip,
7: tot_snowfall,
8: snow_depth,
9: avg_wind_speed,
10: max_wind_speed,
11: wind_dir,
12: min_sun (if reported),
13: percent_possible_sun (if reported),
14: avg_sky_cover [0(clear) - 10(cloudy)],
15: wx_event
[
1: fog,
2: fog reducing vis to < 1/4 mile,
3: thunder,
4: ice pellets,
5: hail,
6: glaze/rime,
7: blowing particulate < 1/4 mile vis,
8:smoke/haze,
9: blowing snow,
X: tornado
],
16: max_wind_gust,
17: max_wind_gust_dir
'''
wxfile = open('wx_data/%s' % filename, 'r')
wxdata = wxfile.readlines()
wxfile.close()
wxdata = wxdata[13:]
index = 0
for line in wxdata:
wxdata[index] = [x for x in string.split(line.strip()) if x != '']
index += 1
# print wxdata
return wxdata
def get_all_wx_data():
'''combines all months of weather data into a dict with month abbrevs as
keys'''
wx_data = collections.OrderedDict()
wx_data['jan'] = get_wx_data('1_wx.dat')
wx_data['feb'] = get_wx_data('2_wx.dat')
wx_data['mar'] = get_wx_data('3_wx.dat')
wx_data['apr'] = get_wx_data('4_wx.dat')
wx_data['may'] = get_wx_data('5_wx.dat')
wx_data['jun'] = get_wx_data('6_wx.dat')
wx_data['jul'] = get_wx_data('7_wx.dat')
wx_data['aug'] = get_wx_data('8_wx.dat')
wx_data['sep'] = get_wx_data('9_wx.dat')
wx_data['oct'] = get_wx_data('10_wx.dat')
wx_data['nov'] = get_wx_data('11_wx.dat')
wx_data['dec'] = get_wx_data('12_wx.dat')
return wx_data
def monthindex(month):
''' given a three char month abbreviation, return the integer month index'''
if month == 'jan':
return 1
elif month == 'feb':
return 2
elif month == 'mar':
return 3
elif month == 'apr':
return 4
elif month == 'may':
return 5
elif month == 'jun':
return 6
elif month == 'jul':
return 7
elif month == 'aug':
return 8
elif month == 'sep':
return 9
elif month == 'oct':
return 10
elif month == 'nov':
return 11
else:
return 12
def main():
'''main, do all the things'''
# load nr_data
nr_data = get_nr_data()
# load each month wx data into a dict
wx_data = get_all_wx_data()
combined_data_table = collections.OrderedDict()
for month in wx_data:
# print month
for day in wx_data[month]:
# print day[0]
this_day = datetime.date(2015, monthindex(month), int(day[0]))
# print this_day
# print day
# rides = [x for x in nr_data if x[0][0] == this_day]
rides = []
for row in nr_data:
# print row[0][0]
if row[0][0] == this_day:
rides.append(row)
data = {'avg_temp': int(day[3]), 'precip': int(day[6]), 'ride_count': len(rides)}
combined_data_table['%s_%s' % (month, day[0])] = data
# print_data(combined_data_table)
new_print(combined_data_table)
def new_print(table):
outfile = open('NiceRideDataOut.dat', 'w')
for row in table:
outfile.write("{'%s': %s}\n" % (row, table[row]))
# print row, ": ", table[row]
outfile.close()
def print_data(table):
jan_data = {}
feb_data = {}
mar_data = {}
apr_data = {}
may_data = {}
jun_data = {}
jul_data = {}
aug_data = {}
sep_data = {}
oct_data = {}
nov_data = {}
dec_data = {}
for row in table:
if row.startswith('jan'):
jan_data[row] = table[row]
elif row.startswith('feb'):
feb_data[row] = table[row]
elif row.startswith('mar'):
mar_data[row] = table[row]
elif row.startswith('apr'):
apr_data[row] = table[row]
elif row.startswith('may'):
may_data[row] = table[row]
elif row.startswith('jun'):
jun_data[row] = table[row]
elif row.startswith('jul'):
jul_data[row] = table[row]
elif row.startswith('aug'):
aug_data[row] = table[row]
elif row.startswith('sep'):
sep_data[row] = table[row]
elif row.startswith('oct'):
oct_data[row] = table[row]
elif row.startswith('nov'):
nov_data[row] = table[row]
elif row.startswith('dec'):
dec_data[row] = table[row]
for key in sorted(jan_data):
print "%s: %s" % (key, jan_data[key])
for key in sorted(feb_data):
print "%s: %s" % (key, feb_data[key])
for key in sorted(mar_data):
print "%s: %s" % (key, mar_data[key])
for key in sorted(apr_data):
print "%s: %s" % (key, apr_data[key])
for key in sorted(may_data):
print "%s: %s" % (key, may_data[key])
for key in sorted(jun_data):
print "%s: %s" % (key, jun_data[key])
for key in sorted(jul_data):
print "%s: %s" % (key, jul_data[key])
for key in sorted(aug_data):
print "%s: %s" % (key, aug_data[key])
for key in sorted(sep_data):
print "%s: %s" % (key, sep_data[key])
for key in sorted(oct_data):
print "%s: %s" % (key, oct_data[key])
for key in sorted(nov_data):
print "%s: %s" % (key, nov_data[key])
for key in sorted(dec_data):
print "%s: %s" % (key, dec_data[key])
if __name__ == '__main__':
main()
|
Sinar/popit_ng | popit/tests/tests_person_api.py | Python | agpl-3.0 | 50,109 | 0.003592 | __author__ = 'sweemeng'
from rest_framework import status
from popit.signals.handlers import *
from popit.models import *
from django.conf import settings
import json
import logging
from popit.tests.base_testcase import BasePopitTestCase
from popit.tests.base_testcase import BasePopitAPITestCase
from popit.serializers.minimized import MinPersonSerializer
# TODO: Test multilingual behavior. To make behavior clear
# TODO: Need new fixtures
class PersonSerializerTestCase(BasePopitTestCase):
def test_fetch_non_empty_field_person_serializer(self):
person = Person.objects.untranslated().get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
serializer = PersonSerializer(person, language='en')
data = serializer.data
self.assertEqual(data["name"], "John")
def test_fetch_empty_field_person_serializer(self):
person = Person.objects.untranslated().get(id='ab1a5788e5bae955c048748fa6af0e97')
serializer = PersonSerializer(person, language='en')
data = serializer.data
self.assertEqual(data["given_name"], "")
def test_fetch_not_empty_relation_person_serializer(self):
person = Person.objects.untranslated().get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
serializer = PersonSerializer(person, language='en')
data = serializer.data
self.assertTrue(data["other_names"])
def test_fetch_empty_relation_person_serializer(self):
person = Person.objects.untranslated().get(id='078541c9-9081-4082-b28f-29cbb64440cb')
serializer = PersonSerializer(person, language='en')
data = serializer.data
self.assertFalse(data["other_names"])
def test_create_person_with_all_field_serializer(self):
person_data = {
"name": "joe",
"family_name": "doe",
"given_name": "joe jambul",
"additional_name": "not john doe",
"gender": "unknown",
"summary": "person unit test api",
"honorific_prefix": "Chief",
"honorific_suffix": "of the fake people league",
"biography": "He does not exists!!!!",
"birth_date": "1950-01-01",
"death_data": "2000-01-01",
"email": "joejambul@sinarproject.org",
"contact_details":[
{
"type":"twitter",
"value": "sinarproject",
}
],
"links":[
{
"url":"http://sinarproject.org",
}
],
"identifiers":[
{
"identifier": "9089098098",
"scheme": "rakyat",
}
],
"other_names":[
{
"name":"Jane",
"family_name":"Jambul",
"start_date": "1950-01-01",
"end_date": "2010-01-01",
}
]
}
person_serial = PersonSerializer(data=person_data, language='en')
person_serial.is_valid()
self.assertEqual(person_serial.errors, {})
person_serial.save()
person = Person.objects.language("en").get(name="joe")
self.assertEqual(person.given_name, "joe jambul")
def test_update_person_serializer(self):
person_data = {
"given_name": "jerry jambul",
}
person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
self.assertEqual(person_.given_name, "jerry jambul")
def test_create_links_person_serializers(self):
person_data = {
"links": [
{
"url": "http://twitter.com/sweemeng",
}
]
}
person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
url = person_.links.language("en").get(url="http://twitter.com/sweemeng")
self.assertEqual(url.url, "http://twitter.com/sweemeng")
def test_update_links_person_serializers(self):
# links id a4ffa24a9ef3cbcb8cfaa178c9329367
person_data = {
"id":"ab1a5788e5bae955c048748fa6af0e97",
"links":[
{
"id": "a4ffa24a9ef3cbcb8cfaa178c9329367",
"note": "just a random repo"
}
]
}
person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language="en")
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
url = person_.links.language("en").get(id="a4ffa24a9ef3cbcb8cfaa178c9329367")
self.assertEqual(url.note, "just a random repo")
def test_update_create_nested_links_persons_serializer(self):
person_data = {
"id":"ab1a5788e5bae955c048748fa6af0e97",
"contact_details":[
{
"id": "a66cb422-eec3-4861-bae1-a64ae5dbde61",
"links": [{
"url": "http://facebook.com",
}]
}
],
}
person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
p | erson_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97')
# There should be only 1 links in that contact
contact = person_.contact_details.langu | age('en').get(id='a66cb422-eec3-4861-bae1-a64ae5dbde61')
links = contact.links.language('en').filter(url="http://sinarproject.org")
self.assertEqual(links[0].url, "http://sinarproject.org")
def test_update_update_nested_links_person_serializer(self):
person_data = {
"id":"8497ba86-7485-42d2-9596-2ab14520f1f4",
"identifiers":[
{
"id": "af7c01b5-1c4f-4c08-9174-3de5ff270bdb",
"links": [{
"id": "9c9a2093-c3eb-4b51-b869-0d3b4ab281fd",
"note": "this is just a test note",
}]
}
],
}
person = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
person_serializer = PersonSerializer(person, data=person_data, partial=True, language='en')
person_serializer.is_valid()
self.assertEqual(person_serializer.errors, {})
person_serializer.save()
person_ = Person.objects.language('en').get(id='8497ba86-7485-42d2-9596-2ab14520f1f4')
identifier = person_.identifiers.language('en').get(id="af7c01b5-1c4f-4c08-9174-3de5ff270bdb")
link = identifier.links.language('en').get(id="9c9a2093-c3eb-4b51-b869-0d3b4ab281fd")
self.assertEqual(link.note, "this is just a test note")
def test_create_identifier_person_serializer(self):
person_data = {
"identifiers": [
{
"scheme": "IC",
"identifier": "129031309",
}
]
}
person = Person.objects.language('en').get(id='ab1a578 |
ojengwa/grr | lib/flows/general/checks.py | Python | apache-2.0 | 3,088 | 0.006477 | #!/usr/bin/env python
"""A flow to run checks for a host."""
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib.checks import checks
from grr.proto import flows_pb2
class CheckFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.CheckFlowArgs
class CheckRun | ner(flow.GRRFlow):
"""This flow runs checks on a host.
CheckRunner:
- Identifies what checks should be run for a host.
- Identifies the artifacts that need to be collected to perform those checks.
- Orchestrates collection of the host data.
- Routes host data to the | relevant checks.
- Returns check data ready for reporting.
"""
friendly_name = "Run Checks"
category = "/Checks/"
behaviours = flow.GRRFlow.behaviours + "BASIC"
@flow.StateHandler(next_state=["MapArtifactData"])
def Start(self):
"""."""
client = aff4.FACTORY.Open(self.client_id, token=self.token)
self.state.Register("knowledge_base",
client.Get(client.Schema.KNOWLEDGE_BASE))
self.state.Register("labels", client.GetLabels())
self.state.Register("artifacts_wanted", set())
self.state.Register("artifacts_fetched", set())
self.state.Register("checks_run", [])
self.state.Register("checks_with_findings", [])
self.state.Register("results_store", None)
self.state.Register("host_data", {})
self.CallState(next_state="MapArtifactData")
@flow.StateHandler(next_state=["AddResponses", "RunChecks"])
def MapArtifactData(self, responses):
"""Get processed data, mapped to artifacts."""
self.state.artifacts_wanted = checks.CheckRegistry.SelectArtifacts(
os=self.state.knowledge_base.os)
# Fetch Artifacts and map results to the artifacts that generated them.
# This is an inefficient collection, but necessary because results need to
# be mapped to the originating artifact. An alternative would be to have
# rdfvalues labeled with originating artifact ids.
for artifact_id in self.state.artifacts_wanted:
self.CallFlow("ArtifactCollectorFlow", artifact_list=[artifact_id],
request_data={"artifact_id": artifact_id},
next_state="AddResponses")
self.CallState(next_state="RunChecks")
@flow.StateHandler()
def AddResponses(self, responses):
artifact_id = responses.request_data["artifact_id"]
# TODO(user): Check whether artifact collection succeeded.
self.state.host_data[artifact_id] = list(responses)
@flow.StateHandler(next_state=["Done"])
def RunChecks(self, responses):
if not responses.success:
raise RuntimeError("Checks did not run successfully.")
# Hand host data across to checks. Do this after all data has been collected
# in case some checks require multiple artifacts/results.
for finding in checks.CheckHost(self.state.host_data,
os=self.state.knowledge_base.os):
self.state.checks_run.append(finding.check_id)
if finding.anomaly:
self.state.checks_with_findings.append(finding.check_id)
self.SendReply(finding)
|
pearsontechnology/st2contrib | packs/bitesize/actions/find_unapproved_ns.py | Python | apache-2.0 | 862 | 0.00232 | #!/usr/bin/python
import importlib
import logging
import os
import json
from datetime import datetime
from pprint import pprint
from st2actions.runners.pythonrunner import Action
class GetUnapproved(Action):
def run(self, allns):
output = []
| for ns in allns['items']:
if 'metadata' in ns and 'labels' in ns['metadata'] and ns['metadata']['labels'] is not None and 'status' in ns['metadata']['labels']:
output.append(ns['metadata']['name'])
if len(output) > 0:
print json.dumps(output)
else:
print "{}"
def json_serial(self, obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, dat | etime):
serial = obj.isoformat()
return serial
raise TypeError("Type not serializable")
|
caplin/qa-browsers | nodejs/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/__init__.py | Python | unlicense | 21,827 | 0.010858 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
flavor = None
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'generator_wants_absolute_build_file_paths':
getattr(generator, 'generator_wants_absolute_build_file_paths', False),
'generator_handles_variants':
getattr(generator, 'generator_handles_variants', False),
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
params['parallel'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def Regenerate | AppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, | then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
|
rgtjf/Semantic-Texual-Similarity-Toolkits | stst/features/__init__.py | Python | mit | 525 | 0.001905 |
from stst.features.features_sequence import *
from stst.features.features_pos import *
from stst.features.features_ngram import *
from stst.features.features_bow import *
from stst.features.features_dependency import *
from stst.features.features_align import *
from stst.features.features_embedding import *
from stst.fe | atures.features_tree_kerne | ls import *
from stst.features.features_wn import *
from stst.features.features_mt import *
from stst.features.features_nn import *
from stst.features.features_negative import * |
gnulinooks/sympy | sympy/core/function.py | Python | bsd-3-clause | 23,521 | 0.005569 | """
There are two types of functions:
1) defined function like exp or sin that has a name and body
(in the sense that function can be evaluated).
e = exp
2) undefined function with a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be Function instance)
3) this isn't implemented yet: anonymous function or lambda function that has
no name but has body with dummy variables. An anonymous function
object creation examples:
f = Lambda(x, exp(x)*x)
f = Lambda(exp(x)*x) # free symbols in the expression define the number of arguments
f = exp * Lambda(x,x)
4) isn't implemented yet: composition of functions, like (sin+cos)(x), this
works in sympycore, but needs to be ported back to SymPy.
Example:
>>> from sympy import *
>>> f = Function("f")
>>> x = Symbol("x")
>>> f(x)
f(x)
>>> print srepr(f(x).func)
Function('f')
>>> f(x).args
(x,)
"""
from basic import Basic, Atom, S, C, sympify
from basic import BasicType, BasicMeta
from operations import AssocOp
from cache import cacheit
from itertools import repeat
from numbers import Rational, Integer
from symbol import Symbol
from add import Add
from multidimensional import vectorize
from sympy import mpmath
from sympy.utilities.decorator import deprecated
class PoleError(Exception):
pass
class FunctionClass(BasicMeta):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
_new = type.__new__
def __new__(cls, arg1, arg2, arg3=None, **options):
assert not options,`options`
if isinstance(arg1, type):
# the following code gets executed when one types
# FunctionClass(Function, "f")
# i.e. cls = FunctionClass, arg1 = Function, arg2 = "f"
# and we simply do an equivalent of:
# class f(Function):
# ...
# return f
ftype, name, signature = arg1, arg2, arg3
#XXX this probably needs some fixing:
assert ftype.__name__.endswith('Function'),`ftype`
attrdict = ftype.__dict__.copy()
attrdict['undefined_Function'] = True
if signature is not None:
attrdict['signature'] = signature
bases = (ftype,)
return type.__new__(cls, name, bases, attrdict)
else:
name, bases, attrdict = arg1, arg2, arg3
return type.__new__(cls, name, bases, attrdict)
def __repr__(cls):
return cls.__name__
class Function(Basic):
"""
Base class for applied functions.
Constructor of undefined classes.
"""
__metaclass__ = FunctionClass
is_Function = True
nargs = None
@vectorize(1)
@cacheit
def __new__(cls, *args, **options):
# NOTE: this __new__ is twofold:
#
# 1 -- it can create another *class*, which can then be instantiated by
# itself e.g. Function('f') creates a new class f(Function)
#
# 2 -- on the other hand, we instantiate -- that is we create an
# *instance* of a class created earlier in 1.
#
# So please keep, both (1) and (2) in mind.
# (1) create new function class
# UC: Function('f')
if cls is Function:
#when user writes Function("f"), do an equivalent of:
#taking the whole class Function(...):
#and rename the Function to "f" and return f, thus:
#In [13]: isinstance(f, Function)
#Out[13]: False
#In [14]: isinstance(f, FunctionClass)
#Out[14]: True
if len(args) == 1 and isinstance(args[0], str):
#always create Function
return FunctionClass(Function, *args)
return FunctionClass(Function, *args, **options)
else:
print args
print type(args[0])
raise Exception("You need to specify exactly one string")
# (2) create new instance of a class created in (1)
# UC: Function('f')(x)
# UC: sin(x)
args = map(sympify, args)
# these lines should be refactored
for opt in ["nargs", "dummy", "comparable", "noncommutative", "commutative"]:
if opt in options:
del options[opt]
# up to here.
if options.get('evaluate') is False:
return Basic.__new__(cls, *args, **options)
r = cls.eval(*args)
if isinstance(r, Basic):
return r
elif r is None:
# Just undefined functions have nargs == None
if not cls.nargs and hasattr(cls, 'undefined_Function'):
r = Basic.__new__(cls, *args, **options)
r.nargs = len(args)
return r
pass
elif not isinstance(r, tuple):
args = (r,)
return Basic.__new__(cls, *args, **options)
@property
def is_commutative(self):
return True
@classmethod
@deprecated
def canonize(cls, *args):
return cls.eval(*args)
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Example of eval() for the function "sign"
---------------------------------------------
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg is S.Zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, C.Mul):
coeff, terms = arg.as_coeff_terms()
if coeff is not S.One:
return cls(coeff) * cls(C.Mul(*terms))
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if self == old:
return new
elif old.is_Function and new.is_Function:
if old == self.func:
if self.nargs is new.nargs or not new.nargs:
return new(*self.args[:])
# Written down as an elif to avoid a super-long line
elif isinstance(new.nargs,tuple) and self.nargs in new.nargs:
return new(*self.args[:])
obj = self.func._eval_apply_subs(*(self.args[:] + (old,) + (new,)))
if obj is not None:
return obj
return Basic._seq_subs(self, old, new)
def _eval_expand_basic(self, *args):
return None
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
fname = self.func.__name__
try:
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRAN | SLATIONS[fname]
func = getattr(mpmath, | fname)
except (AttributeError, KeyError):
return
# Convert all args to mpf or mpc
try:
args = [arg._to_mpmath(prec) for arg in self.args]
except ValueError:
return
# Set mpmath precision and apply. Make sure precision is restored
# afterwards
orig = mpmath.mp.prec
try:
mpmath.mp.prec = prec
v = func(*args)
finally:
mpmath.mp.prec = orig
return Basic._from_mpmath(v, prec)
def _eval_is_comparable(self):
if self.is_Function:
r = True
for s in self.args:
c = s.is_comparable
if c is None: return
if not c: r = False
return r
return
def _eval_derivative(self, s):
# f(x).diff(s) -> x.d |
ytanay/thinglang | tests/compiler/test_inlining.py | Python | mit | 1,570 | 0.005096 | import pytest
from tests.compiler import compile_base, internal_call
from thinglang.compiler.opcodes import OpcodePushStatic, OpcodePushLocal
INLINING_TEST_PROGRAM = '''
thing Program
setup
number n1 = 0
number n2 = 1
{}
static does add with number a, number b
a + b
static does external_call with text file_name
File(file_name)
'''
# TODO: test tracking
@pytest.mark.skip()
def test_inlining_binary_op_constants():
assert compile_base(INLINING_TEST_PROGRAM.format('self.add(1, 2)'), trim=2) == [
OpcodePushStatic(2),
OpcodePushStatic(3),
internal_call('number.__addition__')
]
@pytest.mark.skip()
def test_inlining_binary_op_variables():
assert compile_base(INLINING_TEST_PROGRAM.format('self.add(n1, n2)'), trim=2) == [
OpcodePushLocal(2),
OpcodeP | ushLocal(1),
internal_call('number.__addition__')
]
@pytest.mark.skip()
def test_inlining_binary_op_mixed():
assert compile_base(INLINING_TEST_PROGRAM.format('self.add(2, n1)'), trim=2) == [
OpcodePushLocal(1),
OpcodePushStatic(2),
internal_call('number.__addition__')
]
@pytest.mark.skip()
def test_inlining_second_c | all_simple():
assert compile_base(INLINING_TEST_PROGRAM.format('self.external_call("Hello")'), trim=2) == [
OpcodePushStatic(2),
internal_call('File.__constructor__')
]
@pytest.mark.skip()
def test_inlining_internal_call():
compile_base(INLINING_TEST_PROGRAM.format('Console.print(n1)'), trim=2) |
idf/FaceReader | facerec_py/facerec/serialization.py | Python | mit | 268 | 0 | import cPickle
def save_model(filename, model):
output = open(filename, 'wb')
cPickle.dump(model, output)
output.close()
def load_model(file | name):
pkl_file = open(filenam | e, 'rb')
res = cPickle.load(pkl_file)
pkl_file.close()
return res
|
elnoxgdl/destinystats | GuardianStats.py | Python | apache-2.0 | 5,360 | 0.03806 | #! /usr/bin/python
import requests, pprint, json
from collections import defaultdict
class GuardianStats:
# Bungie API KEY
api_key = '1b6d2823d9ba455db6d22c0c75ae55a2'
def __init__(self, psn_id):
self.membership_id = self.GetMembershipID(psn_id)
self.character_id = self.GetLatestUsedGuardian(self.membershi | p_id)
def GetMembershipID(self, psn_id):
membership_id = None
api_endpoint = 'https://www.bungie.net/Platform/Destiny/SearchDestinyPlayer/2/'+psn_id
headers = {'X-API-Key': self.api_key}
r = requests.get(api_endpoint, headers=headers)
if r.status_code == 200 :
data = r.json()
membership_id = data['Response'][0]['membershipId']
else:
print | 'HTTP error: '+r.status_code
return membership_id
def GetLatestUsedGuardian(self, membership_id):
last_guardian_id = None
api_endpoint = 'https://www.bungie.net/Platform/Destiny/2/Account/'+membership_id+'/Summary/'
headers = {'X-API-Key': self.api_key}
r = requests.get(api_endpoint, headers=headers)
if r.status_code == 200 :
data = r.json()
characters = data['Response']['data']['characters']
data = defaultdict(dict)
for character in characters :
character_id = character['characterBase']['characterId']
last_played = character['characterBase']['dateLastPlayed']
data[last_played] = character_id
dates = sorted(data.keys(), reverse=True)
last_guardian_id = data[dates[0]]
else:
print 'HTTP error: '+r.status_code
return last_guardian_id
def GetPlayerStats_V2(self, membership_id, mode_id):
stats = None
api_endpoint = 'http://api.guardian.gg/v2/players/'+membership_id
r = requests.get(api_endpoint)
if r.status_code == 200 :
data = r.json()
stats = data['data']['modes'][str(mode_id)]
else:
print 'HTTP error: '+r.status_code
return stats
# OLD METHOD (is the only method with rank still checking how to loop and get it)
def GetPlayerStats(self, mode_id):
stats = None
membership_id = self.membership_id
api_endpoint = 'http://api.guardian.gg/elo/'+membership_id
print api_endpoint
r = requests.get(api_endpoint)
if r.status_code == 200 :
data = r.json()
# stats = data['data']['modes'][mode_id]
else:
print 'HTTP error: '+r.status_code
return stats
def GetActivityID(self, mode_id):
activity_id = None
membership_id = self.membership_id
character_id = self.character_id
api_endpoint = 'https://www.bungie.net/Platform/Destiny/Stats/ActivityHistory/2/'+membership_id+'/'+character_id+'/?mode='+mode_id+'&count=1&lc=en'
headers = {'X-API-Key': self.api_key}
r = requests.get(api_endpoint, headers=headers)
if r.status_code == 200 :
data = r.json()
activity_id = data['Response']['data']['activities'][0]['activityDetails']['instanceId']
else:
print 'HTTP error: '+r.status_code
return activity_id
def GetPostCarnageReport(self, mode_id):
report = None
activity_id = self.GetActivityID(str(mode_id))
api_endpoint = 'https://www.bungie.net/Platform/Destiny/Stats/PostGameCarnageReport/'+activity_id+'/?lc=en'
headers = {'X-API-Key': self.api_key}
r = requests.get(api_endpoint, headers=headers)
if r.status_code == 200 :
data = r.json()
report = defaultdict(dict)
period = data['Response']['data']['period']
players = data['Response']['data']['entries']
teams = data['Response']['data']['teams']
mode_id = data['Response']['data']['activityDetails']['mode']
# Mapping the teams
team_map = defaultdict(dict)
for team in teams :
# Assigning more friendly variables
team_id = int(team['teamId'])
team_name = team['teamName']
team_standing = team['standing']['basic']['displayValue']
team_standing_value = int(team['standing']['basic']['value'])
team_score = int(team['score']['basic']['value'])
# Assigning the team map
team_map[team_id]['standing'] = team_standing
team_map[team_id]['name'] = team_name
# Building the report
report[team_standing_value]['team_score'] = team_score
report[team_standing_value]['team_name'] = team_name
report[team_standing_value]['player'] = []
# Mapping the players
for guardian in players :
guardian_standing = int(guardian['standing'])
display_name = guardian['player']['destinyUserInfo']['displayName']
membership_id = guardian['player']['destinyUserInfo']['membershipId']
stats = self.GetPlayerStats_V2(membership_id, mode_id)
elo = int(stats['elo'])
player_data = {
'display_name' : display_name,
'membership_id' : membership_id,
'kills' : int(guardian['values']['kills']['basic']['value']),
'deaths' : int(guardian['values']['deaths']['basic']['value']),
'team' : guardian['values']['team']['basic']['displayValue'],
'team_id' : int(guardian['values']['team']['basic']['value']),
'team_standing' : team_map[guardian['values']['team']['basic']['value']]['standing'],
'elo' : elo
}
report[guardian_standing]['player'].append(player_data)
else:
print 'HTTP error: '+r.status_code
return report
|
linkhub-sdk/popbill.closedown.example.py | getPartnerURL.py | Python | mit | 1,149 | 0.001951 | # -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
import testValue
from popbill import ClosedownService, PopbillException
closedownService = ClosedownService(testValue.LinkID, testValue.SecretKey)
closedownService.IsTest = testValue.IsTest
closedownService.IPRestrictOnOff = testValue.IPRestrictOnOff
closedownService.UseStaticIP = testValue.UseStaticIP
closedownService.UseLocalT | imeYN = testValue.UseLocalTimeYN
' | ''
파트너 포인트충전 팝업 URL을 반환합니다.
- 보안정책에 따라 반환된 URL은 30초의 유효시간을 갖습니다.
- https://docs.popbill.com/closedown/python/api#GetPartnerURL
'''
try:
print("=" * 15 + " 파트너 포인트충전 URL 확인 " + "=" * 15)
# 팝빌회원 사업자번호
CorpNum = testValue.testCorpNum
# CHRG-포인트 충전 URL
TOGO = "CHRG"
url = closedownService.getPartnerURL(CorpNum, TOGO)
print("URL: %s" % url)
except PopbillException as PE:
print("Exception Occur : [%d] %s" % (PE.code, PE.message))
|
minghuascode/pyj | addons/DeferredHandler.py | Python | apache-2.0 | 927 | 0.016181 | """
A modification of pyjamas DeferredMethod
@author: Tobias Weber
@contact: tobi-weber@gmx.de
"""
from pyjamas.Timer import Timer
global deferredHandlers
deferredHandlers = []
global timerIsActive
timerIsActive = False
def add(handler, arguments=[]):
deferredHandlers.append([handler, argumen | ts])
maybeSetDeferredHandlerTimer()
def flushDeferredHandlers():
for i in range(len(deferredHandlers)):
current = deferredHandlers[0]
del deferredHandlers[0]
if current:
handler = current[0]
args | = current[1]
handler(*args)
def maybeSetDeferredHandlerTimer():
global timerIsActive
if (not timerIsActive) and (not len(deferredHandlers)==0):
Timer(1, onTimer)
timerIsActive = True
def onTimer(t):
global timerIsActive
flushDeferredHandlers()
timerIsActive = False
maybeSetDeferredHandlerTimer() |
ProjectSWGCore/NGECore2 | scripts/mobiles/generic/faction/rebel/rebel_commandant.py | Python | lgpl-3.0 | 1,902 | 0.027865 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('corvette_rebel_commandant')
mobileTemplate.setLevel(82)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("rebel")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("rebel")
mobileT | emplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_dressed_rebel_brigadier_general_bith_male.iff')
templates.ad | d('object/mobile/shared_dressed_rebel_brigadier_general_human_female_01.iff')
templates.add('object/mobile/shared_dressed_rebel_brigadier_general_moncal_female.iff')
templates.add('object/mobile/shared_dressed_rebel_brigadier_general_rodian_female_01.iff')
templates.add('object/mobile/shared_dressed_rebel_brigadier_general_sullustan_male.iff')
templates.add('object/mobile/shared_dressed_rebel_brigadier_general_trandoshan_female.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/pistol/shared_pistol_alliance_disruptor.iff', WeaponType.PISTOL, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('rebel_commandant', mobileTemplate)
return |
colour-science/colour | colour/examples/recovery/examples_meng2015.py | Python | bsd-3-clause | 633 | 0.00158 | """Showcases reflectance recovery computations using *M | eng et al. (2015)* method."""
import numpy as np
import colour
from colour.utilities import message_box
message_box('"Meng et al. (2015)" - Reflectance Recovery Computations')
illuminant = colour.SDS_ILLUMINANTS["D65"]
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
message_box(
f'Recovering reflectanc | e using "Meng et al. (2015)" method from given '
f'"XYZ" tristimulus values:\n\n\tXYZ: {XYZ}'
)
sd = colour.XYZ_to_sd(XYZ, method="Meng 2015")
print(sd)
print(colour.recovery.XYZ_to_sd_Meng2015(XYZ))
print(colour.sd_to_XYZ(sd, illuminant=illuminant) / 100)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.