repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
sesuncedu/bitcurator
|
tools/py3fpdf/attic/font/calligra.py
|
Python
|
gpl-3.0
| 2,763
| 0.199421
|
type='TrueType'
name='Calligrapher-Regular'
desc={'Ascent':899,'Descent':-234,'CapHeight':731,'Flags':32,'FontBBox':'[-50 -234 1328 899]','ItalicAngle':0,'StemV':70,'MissingWidth':800}
up=-200
ut=20
cw={
'\x00':800,'\x01':800,'\x02':800,'\x03':800,'\x04':800,'\x05':800,'\x06':800,'\x07':800,'\x08':800,'\t':800,'\n':800,'\x0b':800,'\x0c':800,'\r':800,'\x0e':800,'\x0f':800,'\x10':800,'\x11':800,'\x12':800,'\x13':800,'\x14':800,'\x15':800,
'\x16':800,'\x17':800,'\x18':800,'\x19':800,'\x1a':800,'\x1b':800,'\x1c':800,'\x1d':800,'\x1e':800,'\x1f':800,' ':282,'!':324,'"':405,'#':584,'$':632,'%':980,'&':776,'\'':259,'(':299,')':299,'*':377,'+':600,
',':259,'-':432,'.':254,'/':597,'0':529,'1':298,'2':451,'3':359,'4':525,'5':423,'6':464,'7':417,'8':457,'9':479,':':275,';':282,'<':600,'=':600,'>':600,'?':501,'@':800,'A':743,
'B':636,'C':598,'D':712,'E':608,'F':562,'G':680,'H':756,'I':308,'J':314,'K':676,'L':552,'M':1041,'N':817,'O':729,'P':569,'Q':698,'R':674,'S':618,'T':673,'U':805,'V':753,'W':1238,
'X':716,'Y':754,'Z':599,'[':315,'\\':463,']':315,'^':600,'_':547,'`':278,'a':581,'b':564,'c':440,'d':571,'e':450,'f':347,'g':628,'h':611,'i':283,'j':283,'k':560,'l':252,'m':976,
'n':595,'o':508,'p':549,'q':540,'r':395,'s':441,'t':307,'u':614,'v':556,'w':915,'x':559,'y':597,'z':452,'{':315,'|':222,'}':315,'~':600,'\x7f':800,'\x80':800,'\x81':800,'\x82':0,'\x83':0,
'\x84':0,'\x85':780,'\x86':0,'\x87':0,'\x88':278,'\x89':0,'\x8a':0,'\x8b':0,'\x8c':1064,'\x8d':800,'\x8e':800,'\x8f':800,'\x90':800,'\x91':259,'\x92':259,'\x93':470,'\x94':470,'\x95':500,'\x96':300,'\x97':600,'\x98':278,'\x99':990,
'\x9a':0,'\x9b':0,'\x9c':790,'\x9d':800,'\x9e':800,'\x9f':754,'\xa0':282,'\xa1':324,'\xa2':450,'\xa3':640,'\xa4':518,'\xa
|
5':603,'\xa6':0,'\xa7':519,'\xa8':254,'\xa9':800,'\xaa':349,'\xab':0,'\xac':0,'\xad':432,'\xae':800,'\xaf':278,
'\xb0':0,'\xb1':0,'\xb2'
|
:0,'\xb3':0,'\xb4':278,'\xb5':614,'\xb6':0,'\xb7':254,'\xb8':278,'\xb9':0,'\xba':305,'\xbb':0,'\xbc':0,'\xbd':0,'\xbe':0,'\xbf':501,'\xc0':743,'\xc1':743,'\xc2':743,'\xc3':743,'\xc4':743,'\xc5':743,
'\xc6':1060,'\xc7':598,'\xc8':608,'\xc9':608,'\xca':608,'\xcb':608,'\xcc':308,'\xcd':308,'\xce':308,'\xcf':308,'\xd0':0,'\xd1':817,'\xd2':729,'\xd3':729,'\xd4':729,'\xd5':729,'\xd6':729,'\xd7':0,'\xd8':729,'\xd9':805,'\xda':805,'\xdb':805,
'\xdc':805,'\xdd':0,'\xde':0,'\xdf':688,'\xe0':581,'\xe1':581,'\xe2':581,'\xe3':581,'\xe4':581,'\xe5':581,'\xe6':792,'\xe7':440,'\xe8':450,'\xe9':450,'\xea':450,'\xeb':450,'\xec':283,'\xed':283,'\xee':283,'\xef':283,'\xf0':800,'\xf1':595,
'\xf2':508,'\xf3':508,'\xf4':508,'\xf5':508,'\xf6':508,'\xf7':0,'\xf8':508,'\xf9':614,'\xfa':614,'\xfb':614,'\xfc':614,'\xfd':0,'\xfe':0,'\xff':597}
enc='cp1252'
diff=''
filename='calligra.z'
originalsize=40120
|
davidmarin/pbg
|
python/pbg/common/microdata.py
|
Python
|
apache-2.0
| 1,121
| 0.000892
|
"""Thin wrapper around the microdata library."""
from __future__ import absolute_import
import microdata
class Item(microdata.Item):
"""Add an "extra" field to microdata Items, so people won't feel the need
to make up ad-hoc properties.
Also add __eq__() and __repr__().
"""
def __init__(self, *args, **kwargs):
super(Item, self).__init__(*args, **kwargs)
self.extra = {}
def json_dict(self):
item = super(Item, self).json_dict()
if self.extra:
item['extra'] = self.extra
return item
def __eq__(self, other):
if not isinstance(other, microdata.Item):
return False
return (self.itemid == other.itemid
|
and
self.itemtype == other.itemtype and
self.props == other.props and
self.extra == getattr(other, 'extra', {}))
def __repr__(self):
return '%s(%r, %r, props=%r, extra=%r)' % (
self.__class__.__name__,
' '.join(uri.string for uri in self.ite
|
mtype),
self.itemid,
self.props,
self.extra)
|
olhoneles/olhoneles
|
montanha/util.py
|
Python
|
agpl-3.0
| 3,167
| 0.000316
|
# -*- coding: utf-8 -*-
#
# Copyright (©) 2013 Marcelo Jorge Vieira <metal@alucinados.com>
# Copyright (©) 2013 Gustavo Noronha Silva <gustavo@noronha.eti.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public Li
|
cense
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import date
from montanha.models import Institution, Legislature
def filter_for_institution(data, institution):
if not institutio
|
n:
return data
if not isinstance(institution, Institution):
institution = Institution.objects.get(siglum=institution)
data = data.filter(mandate__legislature__institution=institution)
return data
def get_date_ranges_from_data(institution, data, consolidated_data=False, include_date_objects=True):
""" Takes a data set and returns a dict containing in textual form:
current_date_from: the start date that is being used for this query
current_date_to: the end date that is being used for this query
"""
try:
if consolidated_data:
cdf = data.order_by('date_start')[0].date_start
else:
cdf = data.order_by('date')[0].date
except Exception:
cdf = date.today()
try:
if consolidated_data:
cdt = data.order_by('-date_end')[0].date_end
else:
cdt = data.order_by('-date')[0].date
except Exception:
cdt = date.today()
if institution:
if not isinstance(institution, Institution):
institution = Institution.objects.get(siglum=institution)
# Bound dates to the start of the first legislature to the end
# of the last, which makes more sense to our purposes.
first = institution.legislature_set.order_by('date_start')[0]
last = institution.legislature_set.order_by('-date_end')[0]
else:
first = Legislature.objects.order_by('date_start')[0]
last = Legislature.objects.order_by('-date_end')[0]
min_date = first.date_start
max_date = last.date_end
if cdf < min_date:
cdf = min_date
if cdt > max_date:
cdt = max_date
cdf_string = cdf.strftime('%B de %Y')
cdt_string = cdt.strftime('%B de %Y')
d = dict(current_date_from=cdf_string, current_date_to=cdt_string)
if include_date_objects:
d.update(dict(cdf=cdf, cdt=cdt))
return d
def ensure_years_in_range(date_ranges, years):
nyears = []
cdf = date_ranges['cdf']
cdt = date_ranges['cdt']
for y in years:
d = date(y, 1, 1)
if d < cdf or d > cdt:
continue
nyears.append(y)
return nyears
|
guorendong/iridium-browser-ubuntu
|
tools/telemetry/telemetry/results/page_test_results_unittest.py
|
Python
|
bsd-3-clause
| 12,842
| 0.002803
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import page as page_module
from telemetry.page import page_set
from telemetry.results import base_test_results_unittest
from telemetry.results import page_test_results
from telemetry.timeline import trace_data
from telemetry.value import failure
from telemetry.value import histogram
from telemetry.value import scalar
from telemetry.value import skip
from telemetry.value import trace
class PageTestResultsTest(base_test_results_unittest.BaseTestResultsUnittest):
def setUp(self):
ps = page_set.PageSet(file_path=os.path.dirname(__file__))
ps.AddUserStory(page_module.Page("http://www.bar.com/", ps, ps.base_dir))
ps.AddUserStory(page_module.Page("http://www.baz.com/", ps, ps.base_dir))
ps.AddUserStory(page_module.Page("http://www.foo.com/", ps, ps.base_dir))
self.page_set = ps
@property
def pages(self):
return self.page_set.pages
def testFailures(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(
failure.FailureValue(self.pages[0], self.CreateException()))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.DidRunPage(self.pages[1])
self.assertEqual(set([self.pages[0]]), results.pages_that_failed)
self.assertEqual(set([self.pages[1]]), results.pages_that_succeeded)
self.assertEqual(2, len(resul
|
ts.all_page_runs))
self.assertTrue(results.all_page_runs[0].failed)
self.assertTrue(results.all_page_runs[1].ok)
def testSkip
|
s(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(skip.SkipValue(self.pages[0], 'testing reason'))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.DidRunPage(self.pages[1])
self.assertTrue(results.all_page_runs[0].skipped)
self.assertEqual(self.pages[0], results.all_page_runs[0].user_story)
self.assertEqual(set([self.pages[0], self.pages[1]]),
results.pages_that_succeeded)
self.assertEqual(2, len(results.all_page_runs))
self.assertTrue(results.all_page_runs[0].skipped)
self.assertTrue(results.all_page_runs[1].ok)
def testBasic(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(self.pages[1], 'a', 'seconds', 3))
results.DidRunPage(self.pages[1])
results.PrintSummary()
values = results.FindPageSpecificValuesForPage(self.pages[0], 'a')
self.assertEquals(1, len(values))
v = values[0]
self.assertEquals(v.name, 'a')
self.assertEquals(v.page, self.pages[0])
values = results.FindAllPageSpecificValuesNamed('a')
assert len(values) == 2
def testUrlIsInvalidValue(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
self.assertRaises(
AssertionError,
lambda: results.AddValue(scalar.ScalarValue(
self.pages[0], 'url', 'string', 'foo')))
def testAddSummaryValueWithPageSpecified(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
self.assertRaises(
AssertionError,
lambda: results.AddSummaryValue(scalar.ScalarValue(self.pages[0],
'a', 'units', 3)))
def testUnitChange(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
self.assertRaises(
AssertionError,
lambda: results.AddValue(scalar.ScalarValue(
self.pages[1], 'a', 'foobgrobbers', 3)))
def testTypeChange(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
self.assertRaises(
AssertionError,
lambda: results.AddValue(histogram.HistogramValue(
self.pages[1], 'a', 'seconds',
raw_value_json='{"buckets": [{"low": 1, "high": 2, "count": 1}]}')))
def testGetPagesThatSucceededAllPagesFail(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
results.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'message'))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(self.pages[1], 'a', 'seconds', 7))
results.AddValue(failure.FailureValue.FromMessage(self.pages[1], 'message'))
results.DidRunPage(self.pages[1])
results.PrintSummary()
self.assertEquals(0, len(results.pages_that_succeeded))
def testGetSuccessfulPageValuesMergedNoFailures(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
self.assertEquals(1, len(results.all_page_specific_values))
results.DidRunPage(self.pages[0])
def testGetAllValuesForSuccessfulPages(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
value1 = scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3)
results.AddValue(value1)
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
value2 = scalar.ScalarValue(self.pages[1], 'a', 'seconds', 3)
results.AddValue(value2)
results.DidRunPage(self.pages[1])
results.WillRunPage(self.pages[2])
value3 = scalar.ScalarValue(self.pages[2], 'a', 'seconds', 3)
results.AddValue(value3)
results.DidRunPage(self.pages[2])
self.assertEquals(
[value1, value2, value3], results.all_page_specific_values)
def testGetAllValuesForSuccessfulPagesOnePageFails(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
value1 = scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3)
results.AddValue(value1)
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
value2 = failure.FailureValue.FromMessage(self.pages[1], 'Failure')
results.AddValue(value2)
results.DidRunPage(self.pages[1])
results.WillRunPage(self.pages[2])
value3 = scalar.ScalarValue(self.pages[2], 'a', 'seconds', 3)
results.AddValue(value3)
results.DidRunPage(self.pages[2])
self.assertEquals(
[value1, value2, value3], results.all_page_specific_values)
def testTraceValue(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(trace.TraceValue(None, trace_data.TraceData({'test' : 1})))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(trace.TraceValue(None, trace_data.TraceData({'test' : 2})))
results.DidRunPage(self.pages[1])
results.PrintSummary()
values = results.FindAllTraceValues()
self.assertEquals(2, len(values))
def testCleanUpCleansUpTraceValues(self):
results = page_test_results.PageTestResults()
v0 = trace.TraceValue(None, trace_data.TraceData({'test': 1}))
v1 = trace.TraceValue(None, trace_data.TraceData({'test': 2}))
results.WillRunPage(self.pages[0])
results.AddValue(v0)
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(v1)
results.DidRunPage(self.pages[1])
results.CleanUp()
self.assertTrue(v0.cleaned_up)
self.assertTrue(v1.cleaned_up)
def testNoTracesLeftAfterCleanUp(self):
results = page_test_results.PageTestResults()
v0 = trace.TraceValue(None, t
|
openatv/enigma2
|
lib/python/Plugins/Extensions/QuadPip/plugin.py
|
Python
|
gpl-2.0
| 682
| 0.030792
|
from __future__ import absolute_import
from Plugins.Plugin import PluginDescriptor
from Components.PluginComponent import plugins
from enigma import eDBoxLCD
from .qpip import QuadPipScreen, setDecoderMode
def main(session, **kwargs):
session.open(QuadPipScreen)
def autoStart(reason, **kwargs):
if reason == 0:
setDecoderMode("normal")
elif reason == 1:
pass
def Plugins(**kwargs):
list = []
list.append(
Plugi
|
nDescriptor(name=_("Enable Quad PIP"),
description="Quad Picture in
|
Picture",
where=[PluginDescriptor.WHERE_EXTENSIONSMENU],
fnc=main))
list.append(
PluginDescriptor(
where=[PluginDescriptor.WHERE_AUTOSTART],
fnc=autoStart))
return list
|
edx/configuration
|
util/create_data_czar/create_org_data_czar_policy.py
|
Python
|
agpl-3.0
| 2,563
| 0.001951
|
"""
create_org_data_czar_policy.py
Creates an IAM group for an edX org and applies an S3 policy to that group
that allows for read-only access to the group.
"""
import argparse
import boto3
from botocore.exceptions import ClientError
from string import Template
import sys
template = Template("""{
"Version":"2012-10-17",
"Statement": [
{
"Sid": "AllowListingOfOrgFolder",
"Action": ["s3:ListBucket"],
"Effect": "Allow",
"Resource": ["arn:aws:s3:::edx-course-data"],
"Condition":{"StringLike":{"s3:prefix":["$org","$org/*"]}}
},
{
"Sid": "AllowGetBucketLocation",
"Action": ["s3:GetBucketLocation"],
"Effect": "Allow",
"Resource": ["arn:aws:s3:::edx-course-data"]
},
{
"Sid": "AllowGetS3ActionInOrgFolder",
"Effect": "Allow",
"Action": ["s3:GetObject"],
"Resource": ["arn:aws:s3:::edx-course-data/$org/*"]
}
]
}""")
def add_org_group(org, iam_connection):
group_name = "edx-course-data-{org}".format(org=org)
try:
iam_connection.create_group(GroupName=group_name)
except ClientError as bse:
if bse.response['ResponseMetadata']['HTTPStatusCode'] == 409:
pass
else:
print(bse)
try:
iam_connection.put_group_policy(
GroupName=group_name,
PolicyName=group_name,
PolicyDocument=template.substitute(org=org)
)
except boto.exception.BotoServerError as bse:
if bse.response['ResponseMetadata']['HTTPStatusCode'] == 409:
pass
else:
print(bse)
print(template.substitute(org=org))
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('-o', '--org', help='Name of the org for which to create an IAM '
'role and policy, this should have the same '
'name as the S3 bucket')
group.add_argument('-f', '--file', help='The path to a file containing one org name '
'per line.')
args = parser.parse_args()
iam_connection = boto3.client('iam')
if args.org:
add_org_group(args.org.rstrip('\n').lower(), iam_connection)
elif args.file:
with open(args.file) as file:
for line in file:
org = line.rstrip('\n').lower()
add_or
|
g_group(org, iam_connection)
else:
parser.print_usage()
sys.exit(1)
|
sys.exit(0)
|
devton/pagarme-py
|
pagarme/transaction.py
|
Python
|
mit
| 1,490
| 0.000671
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from .exceptions import TransactionValidationError
AVAIABLE_PAYMENT_METHOD = ['credit_card', 'boleto']
def validate_transaction(attrs):
if len(attrs) <= 0:
raise TransactionValidationError('Need a valid attr dict')
errors = []
if 'amount' not in attrs or attrs['amo
|
unt'] <= 0:
errors.append('Need to define an amount')
if 'payment_method' not in attrs:
errors.append('Need to define an valid payment_method')
if 'payment_method' in attrs:
if not attrs['payment_method'] in AVAIABLE_PAYMENT_METHOD:
errors.append(
"invalid payment_method need be boleto or credit_card")
if len(errors) > 0:
raise TransactionValidationError(', '.join(errors))
|
class Transaction():
def __init__(self, requester):
self.requester = requester
self.attributes = {}
def get_transactions(self, page=1):
return self.requester.commit('/transactions', {'page': page}, 'GET')
def build_transaction(self, transaction_attributes):
if not isinstance(transaction_attributes, dict):
raise TransactionValidationError(
'Transaction attributes need be an dict')
self.attributes.update(transaction_attributes)
def charge(self):
self.validate_attrs
def validate_attrs(self):
validate_transaction(self.attributes)
|
51reboot/actual_09_homework
|
07/xq/app.py
|
Python
|
mit
| 10,060
| 0.018564
|
#encoding: utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8') #设置命令行为utf-8,让代码里面的中文正常显示
from flask import Flask #从flask包导入Flask类
from flask import render_template #从flask包导入render_template函数
from flask import request #从flask包导入request对象
from flask import redirect #从flask包导入redirect函数
from flask import session #从flask包导入session函数
from flask import flash #从flask包导入flash函数
from functools import wraps #从functools导入wraps函数
import userdb as user #导入user模块
import os
app = Flask(__name__)
def login_required(func): #函数装饰器,用于装饰登陆是否有session,检验身份
@wraps(func)
def wrapper(*args, **kwargs):
if session.get('user') is None:
return redirect('/')
rt = func(*args, **kwargs)
return rt
return wrapper
'''
打开用户登录页面
'''
@app.route('/') #将url path=/的请求交由index函数处理
def index():
return render_template('login.html') #渲染login.html模板,并返回页面内容
'''
用户登录信息检查
'''
@app.route('/login/', methods=["POST"]) #将url path=/login/的post请求交由login函数处理
def login():
username
|
= request.form.get('username', '') #接收用户提交的数据
password = request.form.get('password', '')
print "username is %s" %username
_users,_error = user.get_info(username=username)
if _users:
_id = _users[0]['id']
else:
_id = ''
#希望把ID加进去作为session绑定,后面根据id修改对应用户的密码!
#需要验证用户名密码是否正确
if user.validate_login(username, passw
|
ord): #判断用户登录是否合法
session['user'] = {'username':username} #设置session,绑定用户身份信息,和用户名绑定,类似办银行卡
session['id'] = {'id':_id}
flash("登陆成功!") #flask的消息闪现,一次生成一个, 通过函数get_flashed_messages()获取
print session #打印session信息,用于查看,理解session
return redirect('/users/') #跳转到url展示用户页面
else:
#登录失败
return render_template('login.html', username=username, error='用户名或密码错误')
'''
用户列表显示
'''
@app.route('/users/') #将url path=/users/的get请求交由users函数处理
@login_required
def users():
#获取所有用户的信息
_users,_errors = user.get_info()
return render_template('users.html', users=_users,username=session.get('user').get('username')) #加载渲染users.html模板
'''
跳转到新建用户信息输入的页面
'''
@app.route('/user/create/') #将url path=/user/create/的get请求交由create_user处理
@login_required
def create_user():
return render_template('user_create.html') #加载渲染user_create.html
'''
存储新建用户的信息,POST
'''
@app.route('/user/add/', methods=['POST']) #将url path=/user/add的post请求交由add_user处理
@login_required
def add_user():
username = request.form.get('username', '')
password = request.form.get('password', '')
age = request.form.get('age', '')
# gender = request.form.get('gender','1')
# hobby = request.form.getlist('hobby')
# department = request.form.getlist('department')
# img = request.files.get('img')
# if img:
# print img.filename
# img.save('/tmp/kk.txt')
# print request.form
# print gender
# print hobby
# print department
#检查用户信息是否合法
_is_ok, _error = user.validate_add_user(username, password, age)
if _is_ok:
user.add_user(username, password, age) #检查ok,添加用户信息
flash("用户%s添加成功!" %username)
return redirect('/users/') #跳转到用户列表
else:
#跳转到用户新建页面,回显错误信息&用户信息
return render_template('user_create.html', \
error=_error, username=username, \
password=password, age=age)
'''
打开用户信息修改页面,默认GET
'''
@app.route('/user/modify/') #将url path=/user/modify/的get请求交由modify_user函数处理
@login_required
def modify_user():
_id = request.args.get('id', '')
_users,_error = user.get_info(_id=_id)
if _users:
_username = _users[0]['username']
_password =_users[0]['password']
_age = _users[0]['age']
else:
_error = '用户不存在'
return render_template('user_modify.html',_error=_error,_id=_id,password=_password, age=_age, username=_username)
'''
提交表单,保存用户数据
'''
@app.route('/user/update/', methods=['GET','POST']) #将url path=/user/update/的post请求交由update_user函数处理
@login_required
def update_user():
#获取修改页面的用户信息
_id = request.form.get('id', '')
username = request.form.get('username', '')
password = request.form.get('password', '')
age = request.form.get('age', 0)
#print type(_id),type(username),type(password),type(age)
#检查在修改页面的用户信息
_is_valid_ok, _error = user.validate_update_user(_id,username,password,age)
#print "valid:%s" %_is_valid_ok
#print "error:%s" %_error
if _is_valid_ok:
user.update_user(_id,username, password, age)
flash("用户%s修改成功!" %username)
return redirect('/users/')
else:
return render_template('user_modify.html', _id=_id,error=_error, username=username, password=password, age=age)
@app.route('/user/delete/')
@login_required
def delete_user():
_id = request.args.get('id')
_user,_error = user.get_info(_id=_id)
if _user is None:
_error = '用户信息不存在'
else:
username = _user[0]['username']
user.delete_user(_id)
flash("%s删除成功" %username)
return redirect('/users/')
@app.route('/user/find/',methods=['POST','GET'])
def finder_user():
username = request.form.get('username','')
users,_error = user.get_info(username=username)
if users:
return render_template('users.html',users = users)
flash("Sorry,没有查到相关数据!")
return render_template('users.html')
@app.route('/user/logs/',methods=['POST','GET'])
@login_required
def logs():
count = request.form.get('count',10)
count = int(count) if str(count).isdigit() else 10
logs,_error = user.get_info(_count=count)
return render_template("logs.html",logs=logs)
@app.route('/user/customlogs/',methods=['POST','GET'])
def custom_logs():
sql = request.form.get('sql','select * from logs limit 10;')
print "sql is %s" %sql
_result,_error = user.get_info(_sql=sql)
if not sql:
return redirect('/user/customlogs/')
if _result:
return render_template("customlogs.html",result=_result,sql=sql)
else:
return render_template("customlogs.html",result=_result,sql=sql,error=_error)
'''
打开密码修改页面,默认GET
'''
@app.route('/user/modifypasswd/') #将url path=/user/modify/的get请求交由modify_user函数处理
@login_required
def modify_password():
_id = session.get('id').get('id')
print "id is %s" %_id
_users,_error = user.get_info(_id=_id)
if _users:
_username = _users[0]['username']
_password =_users[0]['password']
_age = _users[0]['age']
return render_template('passwd_modify.html',username=_username)
'''
提交更新密码表单,保存并自动返回首页
'''
@app.route('/user/updatepasswd/',methods=['POST','GET'])
def update_passwd():
_id = session.get('id').get('id')
#提交表单中的原密码
_password = request.form.get('_password')
print "old passwd :%s" %_password
#提交表单中的新密码
_password1 = request.form.get('_password1')
_password2 = request.form.get('_password2')
print "new passwd1:%s" %_password1
print "new passwd2:%s" %_password2
_result,_error = user.validate_new_password(_id,_password1,_password2)
if user.validate_password(_id,_password):
if _result:
user.update_password(_id,_password1)
flash("密码修改成功!")
return redirect('/users/')
else:
return render_template('passwd_modify.html',error=_error,_password=_password,_password1=_password1,_password2=_password2)
else:
flash("原密码输入错误,请重新输入!")
return render_template('passwd_modify.html')
@app.route('/user/upload/')
@login_required
def upload_page():
return render_template('upload.html')
@app.route('/user/uploadaction/',methods=['POST','GET'])
@login_required
de
|
rtucker-mozilla/inventory
|
vendor-local/src/django-extensions/django_extensions/management/commands/create_command.py
|
Python
|
bsd-3-clause
| 3,608
| 0.003326
|
import os
from django.core.management.base import CommandError, AppCommand
from django_extensions.management.utils import _make_writeable
from optparse import make_option
class Command(AppCommand):
option_list = AppCommand.option_list + (
make_option('--name', '-n', action='store', dest='command_name', default='sample',
help='The name to use for the management command'),
make_option('--base', '-b', action='store', dest='base_command', default='Base',
help='The base class used for implementation of this command. Should be one of Base, App, Label, or NoArgs'),
)
help = ("Creates a Django management command directory structure for the given app name"
" in the current directory.")
args = "[appname]"
label = 'application name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = True
def handle_app(self, app, **options):
directory = os.getcwd()
app_name = app.__name__.split('.')[-2]
project_dir = os.path.join(directory, app_name)
if not os.path.exists(project_dir):
try:
os.mkdir(project_dir)
exc
|
ept OSError, e:
raise CommandError(e)
copy_template('command_template', project_dir, options.get('command_name'), '%sCommand' % options.get('base_command'))
def copy_template(template_name, copy_to, com
|
mand_name, base_command):
"""copies the specified template directory to the copy_to location"""
import django_extensions
import re
import shutil
template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name)
handle_method = "handle(self, *args, **options)"
if base_command == 'AppCommand':
handle_method = "handle_app(self, app, **options)"
elif base_command == 'LabelCommand':
handle_method = "handle_label(self, label, **options)"
elif base_command == 'NoArgsCommand':
handle_method = "handle_noargs(self, **options)"
# walks the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir) + 1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f.replace('sample', command_name))
if os.path.exists(path_new):
path_new = os.path.join(copy_to, relative_dir, f)
if os.path.exists(path_new):
continue
path_new = path_new.rstrip(".tmpl")
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read().replace('{{ command_name }}', command_name).replace('{{ base_command }}', base_command).replace('{{ handle_method }}', handle_method))
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
|
auag92/n2dm
|
Asap-3.8.4/Debug/UnBalance.py
|
Python
|
mit
| 1,763
| 0.01418
|
#PBS -N UnBalance
#PBS -m ae
#PBS -q long
#PBS -l nodes=1:opteron:ppn=2
"""Test handling of extreme load-unbalancing."""
from asap3 import *
from asap3.md import MDLogger
from ase.lattice.cubic import FaceCenteredCubic
import numpy as np
from asap3.mpi import world
#DebugOutput("UnBalance.%d.out")
#set_verbose(1)
print_version(1)
fast = False
#AsapThreads()
cpulayout = (1,1,2)
element = 'Pt'
size = (20,20,100)
master = world.rank == 0
if master:
atoms = FaceCenteredCubic(symbol=element, size=size, pbc=(True, True, False))
atoms.center(vacuum=10.0, axis=2)
atoms.set_momenta(np.zeros((len(atoms),3)))
# Select an atom to get a kick
r = atoms.get_positions()
uc = atoms.get_cell()
x = r[:,0] - 0.5 * uc[0,0]
y = r[:,1] - 0.5 * uc[1,1]
z = r[:,2]
zprime = z - 0.01 * (x * x + y * y)
n = np.argmax(zprime)
#a = atoms[n]
#dp = np.sqrt(2 * a.mass * 1000.0)
#a.momentum = np.array([0, 0, dp])
t = np.zeros(len(atoms), int)
t[n] = 1
atoms.set_tags(t)
else:
atoms = None
atoms =
|
MakeParallelAtoms(atoms, cpulayout)
print len(atoms), atoms.get_number_of_atoms()
atoms.set_calculator(EMT())
traj = PickleTrajectory("UnBalance.traj", "w", atoms)
if fast:
atoms.get_forces()
traj.write()
for i in range(50):
|
print "\n\n\n\n*** STEP %i ***\n\n\n\n\n" % (i,)
r = atoms.get_positions()
r += atoms.get_tags().reshape((-1,1)) * np.array([[0, 0, 20.0],])
atoms.set_positions(r)
atoms.get_forces()
traj.write()
else:
dyn = VelocityVerlet(atoms, 5*units.fs)
logger = MDLogger(dyn, atoms, 'UnBalance.log', stress=True, peratom=True)
dyn.attach(logger, interval=10)
dyn.attach(traj, interval=100)
dyn.run(10000)
|
lerker/cupydle
|
cupydle/dnn/capas.py
|
Python
|
apache-2.0
| 8,329
| 0.005043
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Ponzoni, Nelson"
__copyright__ = "Copyright 2015"
__credits__ = ["Ponzoni Nelson"]
__maintainer__ = "Ponzoni Nelson"
__contact__ = "npcuadra@gmail.com"
__email__ = "npcuadra@gmail.com"
__license__ = "GPL"
__version__ = "1.0.0"
__status__ = "Production"
"""
"""
import numpy
import theano
from cupydle.dnn.funciones import sigmoideaTheano
from cupydle.dnn.funciones import linealRectificadaTheano
from warnings import warn
class Capa(object):
def __init__(self, unidadesEntrada, unidadesSalida, entrada, rng,
funcionActivacion, W=None, b=None):
# segun la funcion de activacion (str) seleccionada
|
if funcionActivacion == 'sigmoidea':
funcionActivacion_tmp = sigmoideaTheano()
elif funcionActivacion == 'linealRectificada':
funcionActivacion_tmp = linealRectificadaTheano()
else:
funcionActivacion_tmp = None
self.funcionActivacion = funcionActivacion_tmp
if W is None:
W_value
|
s = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (unidadesEntrada + unidadesSalida)),
high=numpy.sqrt(6. / (unidadesEntrada + unidadesSalida)),
size=(unidadesEntrada, unidadesSalida)
),
dtype=theano.config.floatX
)
if type(self.funcionActivacion) == type(sigmoideaTheano()):
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
del W_values
else:
if type(W).__module__ != numpy.__name__:
assert False, "Solo acepto del tipo numpy.ndarray"
else:
W = theano.shared(value=W, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((unidadesSalida,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
del b_values
else:
if type(b).__module__ != numpy.__name__:
assert False, "Solo acepto del tipo numpy.ndarray"
else:
b = theano.shared(value=b, name='b', borrow=True)
self.W = W
self.b = b
# parameters of the model
#self.params = [self.W, self.b]
self.x = entrada
def activate(self):
lin_output = theano.tensor.dot(self.x, self.W) + self.b
#output = (lin_output if self.funcionActivacion is None else self.funcionActivacion(lin_output))
output = self.funcionActivacion(lin_output)
return output
# propiedades intrisecas de las capas
def __str__(self):
return str("Capa: " + str(type(self))
+ "\n W[" + str(self.W) + "]: "
+ str(self.W.get_value(borrow=True).shape)
+ " " + str(type(self.W))
+ "\n bias[" + str(self.b) + "]:"
+ str(type(self.b.get_value(borrow=True).shape))
+ " " + str(type(self.b)))
# funciones para obtener valores
def get_weights(self):
warn("No se deberia utilizar mas, <<getW>>")
return self.W
def get_bias(self):
warn("No se deberia utilizar mas, <<getB>>")
return self.b
@property
def getW(self):
return self.W.get_value(borrow=True)
@property
def getB(self):
return self.b.get_value(borrow=True)
def set_weights(self, w):
if isinstance(w, theano.TensorType):
self.W.set_value(w)
else:
assert False
def set_bias(self, b):
if isinstance(b, theano.TensorType):
self.b.set_value(b)
else:
assert False
class CapaClasificacion(Capa):
def __init__(self, unidadesEntrada, unidadesSalida, entrada, W=None, b=None):
# initialize with 0 the weights W as a matrix of shape (unidadesEntrada, unidadesSalida)
if W is None:
W_values = numpy.zeros((unidadesEntrada, unidadesSalida), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W', borrow=True)
del W_values
else:
if type(W).__module__ != numpy.__name__:
assert False, "Solo acepto del tipo numpy.ndarray"
else:
W = theano.shared(value=W, name='W', borrow=True)
# initialize the biases b as a vector of unidadesSalida 0s
if b is None:
b_values = numpy.zeros((unidadesSalida,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
del b_values
else:
if type(b).__module__ != numpy.__name__:
assert False, "Solo acepto del tipo numpy.ndarray"
else:
b = theano.shared(value=b, name='b', borrow=True)
self.W = W
self.b = b
# parameters of the model
#self.params = [self.W, self.b]
self.x = entrada
def activate(self):
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyperplane for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of
# hyperplane-k
return theano.tensor.nnet.softmax(theano.tensor.dot(self.x, self.W) + self.b)
def predict(self):
# symbolic description of how to compute prediction as class whose
# probability is maximal
return theano.tensor.argmax(self.activate(), axis=1)
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -theano.tensor.mean(theano.tensor.log(self.activate())[theano.tensor.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.predict().ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.predict().type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return theano.tensor.mean(theano.tensor.neq(self.
|
ah-anssi/SecuML
|
SecuML/core/DimensionReduction/Configuration/Projection/SdmlConfiguration.py
|
Python
|
gpl-2.0
| 1,670
| 0.001198
|
# SecuML
# Copyright (C) 2016 ANSSI
#
# SecuML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# SecuML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Yo
|
u should have received a copy of the GNU General Public License along
# with SecuML. If not, see <http://www.gnu.org/licenses/>.
from SecuML.core.DimensionReduction.Algorithms.Projection.Sdml import Sdml
from SecuML.core.DimensionReduct
|
ion.Configuration import DimensionReductionConfFactory
from .SemiSupervisedProjectionConfiguration import SemiSupervisedProjectionConfiguration
class SdmlConfiguration(SemiSupervisedProjectionConfiguration):
def __init__(self, families_supervision=None):
SemiSupervisedProjectionConfiguration.__init__(
self, Sdml, families_supervision=families_supervision)
@staticmethod
def fromJson(obj):
conf = SdmlConfiguration(
families_supervision=obj['families_supervision'])
conf.num_components = obj['num_components']
return conf
def toJson(self):
conf = SemiSupervisedProjectionConfiguration.toJson(self)
conf['__type__'] = 'SdmlConfiguration'
return conf
DimensionReductionConfFactory.getFactory().registerClass('SdmlConfiguration',
SdmlConfiguration)
|
MartinHjelmare/home-assistant
|
homeassistant/components/emoncms_history/__init__.py
|
Python
|
apache-2.0
| 2,839
| 0
|
"""Support for sending data to Emoncms."""
import logging
from datetime import timedelta
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_API_KEY, CONF_WHITELIST, CONF_URL, STATE_UNKNOWN, STATE_UNAVAILABLE,
CONF_SCAN_INTERVAL)
from homeassistant.helpers import state as state_helper
from homeassistant.helpers.event import track_point_in_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'emoncms_history'
CONF_INPUTNODE = 'inputnode'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Required(CONF_INPUTNODE): cv.positive_int,
vol.Required(CONF_WHITELIST): cv.entity_ids,
vol.Optional(CONF_SCAN_INTERVAL, default=30): cv.positive_int,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Emoncms history component."""
conf = config[DOMAIN]
whitelist = conf.get(CONF_WHITELIST)
def send_data(url, apikey, node, payload):
"""Send payload data to Emoncms."""
try:
fullurl = '{}/input/po
|
st.json'.format(url)
data = {"apikey": apikey, "data": payload}
parameters = {"node": node}
req = requests.post(
fullurl, params=parameters, data=data, allow_redirects=True,
timeout=5)
except requests.exceptions.RequestException:
_LOGGER.error("Error saving data '%s' to '%s'", payl
|
oad, fullurl)
else:
if req.status_code != 200:
_LOGGER.error(
"Error saving data %s to %s (http status code = %d)",
payload, fullurl, req.status_code)
def update_emoncms(time):
"""Send whitelisted entities states regularly to Emoncms."""
payload_dict = {}
for entity_id in whitelist:
state = hass.states.get(entity_id)
if state is None or state.state in (
STATE_UNKNOWN, '', STATE_UNAVAILABLE):
continue
try:
payload_dict[entity_id] = state_helper.state_as_number(state)
except ValueError:
continue
if payload_dict:
payload = "{%s}" % ",".join("{}:{}".format(key, val)
for key, val in
payload_dict.items())
send_data(conf.get(CONF_URL), conf.get(CONF_API_KEY),
str(conf.get(CONF_INPUTNODE)), payload)
track_point_in_time(hass, update_emoncms, time +
timedelta(seconds=conf.get(CONF_SCAN_INTERVAL)))
update_emoncms(dt_util.utcnow())
return True
|
gchq/gaffer-tools
|
python-shell/src/example.py
|
Python
|
apache-2.0
| 26,676
| 0.000487
|
#
# Copyright 2016-2019 Crown Copyright
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from gafferpy import gaffer as g
from gafferpy import gaffer_connector
def run(host, verbose=False):
return run_with_connector(create_connector(host, verbose))
def run_with_connector(gc):
print()
print('Running operations')
print('--------------------------')
print()
get_schema(gc)
get_fil
|
ter_functions(gc)
get_class_filter_functions(gc)
get_element_generators(gc)
get_object_generators(gc)
get_operations(gc)
get_serialised_fields(gc)
get_store_traits(gc)
is_operation_supported(gc)
add_elements(gc)
get_elements(gc)
get_adj_seeds(gc)
get_all_elements(gc)
get_walks(gc)
generate_elements(gc)
generate_domain_objs(gc)
gene
|
rate_domain_objects_chain(gc)
get_element_group_counts(gc)
get_sub_graph(gc)
export_to_gaffer_result_cache(gc)
get_job_details(gc)
get_all_job_details(gc)
add_named_operation(gc)
get_all_named_operations(gc)
named_operation(gc)
delete_named_operation(gc)
add_named_view_summarise(gc)
add_named_view_date_range(gc)
get_all_named_views(gc)
named_view_summarise(gc)
named_view_date_range(gc)
named_views(gc)
delete_named_views(gc)
sort_elements(gc)
max_element(gc)
min_element(gc)
to_vertices_to_entity_seeds(gc)
complex_op_chain(gc)
op_chain_in_json(gc)
def create_connector(host, verbose=False):
return gaffer_connector.GafferConnector(host, verbose)
def get_schema(gc):
# Get Schema
result = gc.execute_get(
g.GetSchema()
)
print('Schema:')
print(result)
print()
def get_filter_functions(gc):
# Get filter functions
result = gc.execute_get(
g.GetFilterFunctions()
)
print('Filter Functions:')
print(result)
print()
def get_class_filter_functions(gc):
# Get class filter functions
class_name = 'uk.gov.gchq.koryphe.impl.predicate.IsMoreThan'
result = gc.execute_get(
g.GetClassFilterFunctions(class_name=class_name)
)
print('Class Filter Functions (IsMoreThan):')
print(result)
print()
def get_element_generators(gc):
# Get Element generators
result = gc.execute_get(
g.GetElementGenerators()
)
print('Element generators:')
print(result)
print()
def get_object_generators(gc):
# Get Object generators
result = gc.execute_get(
g.GetObjectGenerators()
)
print('Object generators:')
print(result)
print()
def get_operations(gc):
# Get operations
result = gc.execute_get(
g.GetOperations()
)
print('Operations:')
print(result)
print()
def get_serialised_fields(gc):
# Get serialised fields
class_name = 'uk.gov.gchq.koryphe.impl.predicate.IsMoreThan'
result = gc.execute_get(
g.GetSerialisedFields(class_name=class_name)
)
print('Serialised Fields (IsMoreThan):')
print(result)
print()
def get_store_traits(gc):
# Get Store Traits
result = gc.execute_get(
g.GetStoreTraits()
)
print('Store Traits:')
print(result)
print()
def is_operation_supported(gc):
# Is operation supported
operation = 'uk.gov.gchq.gaffer.operation.impl.add.AddElements'
result = gc.is_operation_supported(
g.IsOperationSupported(operation=operation)
)
print(
'\nOperation supported ("uk.gov.gchq.gaffer.operation.impl.add.AddElements"):')
print(result)
print()
def add_elements(gc):
# Add Elements
gc.execute_operation(
g.AddElements(
input=[
g.Entity(
group='JunctionUse',
vertex='M1:1',
properties={
'countByVehicleType': g.freq_map({
'BUS': 10,
'CAR': 50
}),
'endDate': g.date(1034319600000),
'count': g.long(60),
'startDate': g.date(1034316000000)
}
),
g.Edge(
group='RoadHasJunction',
source='M1',
destination='M1:1',
directed=True,
properties={}
)
]
)
)
print('Elements have been added')
print()
def get_elements(gc):
# Get Elements
input = gc.execute_operation(
g.GetElements(
input=[
g.EntitySeed('M5:10'),
# Edge input can be provided as follows
g.EdgeSeed('M5:10', 'M5:11', g.DirectedType.EITHER),
g.EdgeSeed('M5:10', 'M5:11', g.DirectedType.DIRECTED),
# Or you can use True or False for the direction
g.EdgeSeed('M5:10', 'M5:11', True)
],
view=g.View(
edges=[
g.ElementDefinition(
group='RoadUse',
group_by=[],
transient_properties=[
g.Property('description', 'java.lang.String')
],
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=['count'],
predicate=g.IsMoreThan(
value=g.long(1)
)
)
],
transform_functions=[
g.FunctionContext(
selection=['SOURCE', 'DESTINATION', 'count'],
function=g.Function(
class_name='uk.gov.gchq.gaffer.traffic.transform.DescriptionTransform'
),
projection=['description']
)
]
)
]
),
directed_type=g.DirectedType.EITHER
)
)
print('Related input')
print(input)
print()
def get_adj_seeds(gc):
# Adjacent Elements - chain 2 adjacent entities together
adj_seeds = gc.execute_operations(
[
g.GetAdjacentIds(
input=[
g.EntitySeed(
vertex='M5'
)
],
view=g.View(
edges=[
g.ElementDefinition(
'RoadHasJunction',
group_by=[]
)
]
),
include_incoming_out_going=g.InOutType.OUT
),
g.GetAdjacentIds(
view=g.View(
edges=[
g.ElementDefinition(
'RoadUse',
group_by=[]
)
]
),
include_incoming_out_going=g.InOutType.OUT
)
]
)
print('Adjacent entities - 2 hop')
print(adj_seeds)
print()
def get_all_elements(gc):
# Get all input, but limit the total results to 3
all_elements = gc.execute_operations(
operations=[
g.GetAllElements(),
g.Limit(result_limit=3)
]
)
print('All input (Limited to first 3)')
print(all_elements)
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/drsblobs/DsCompressedChunk.py
|
Python
|
gpl-2.0
| 1,282
| 0.00702
|
# encoding: utf-8
# module samba.dcerpc.drsblobs
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/drsblobs.so
# by generator 1.135
""" drsblobs DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class DsCompressedChunk(__talloc.Object):
# no
|
doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __ndr_pack__(self, *args, **kwargs): # real signature unknown
"""
S.ndr_pack(object) -> blob
NDR pack
"""
pass
def __ndr_print__(self, *args, **kwargs): # real signature unknown
"""
S.ndr_print(object) -> None
NDR print
"""
pass
def __ndr_unpack__(self, *args, **kwargs): # real signature unknown
|
"""
S.ndr_unpack(class, blob, allow_remaining=False) -> None
NDR unpack
"""
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
data = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
marker = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
mo/project-euler
|
python/problem15.py
|
Python
|
mit
| 8,409
| 0.015816
|
#
# In this file, there are code present for solving this pr
|
oblem using
# several different approaches. The first ones were two slow to be
# valid solution (Project EULER says that it should be possible to
# find an algorithm that solves the problem in less than 60 secs).
#
#
# In the end, method number five ended up solving this problem in
# 0.024 secs which is really nice seeing as how the first three
# attempts could not solve the problem even if they ran overnight.
#
#
# Note: It might also be interesting to look at the corresponding
#
|
sequence (the number of unique paths through an NxN grid)
# in Sloane's integer sequence dictionary:
#
# http://www.research.att.com/~njas/sequences/?q=2%2C6%2C20%2C70%2C252%2C924%2C3432%2C12870%2C48620&language=english&go=Search
import time
def number_of_bits_set(n):
bits_set = 0
while (n):
bits_set += 1
n &= n - 1
return bits_set
def number_of_bits_set_2(n):
bits_set = 0
while (n):
bits_set += n & 1
n >>= 1
return bits_set
def is_valid_path_bitcount_test(n, gridSize):
if number_of_bits_set(n) == gridSize:
return True
return False
def is_valid_path_custom(n, gridSize):
smallest_int_representing_a_valid_path = 2**gridSize - 1 # int with the lowest "gridSize" bits all set
if n < smallest_int_representing_a_valid_path:
return False;
bits_set = 0
for k in xrange(0, 2 * gridSize):
bits_set += n & 1
n >>= 1
if bits_set > gridSize:
return False
return bits_set == gridSize
def is_valid_path(n, gridSize):
return is_valid_path_custom(n, gridSize)
def unit_tests():
for x in xrange(1, 1500):
assert number_of_bits_set(x) == number_of_bits_set_2(x)
for gridSize in xrange(1, 7):
for x in xrange(1, 2**gridSize):
assert is_valid_path_bitcount_test(x, gridSize) == is_valid_path_custom(x, gridSize)
def number_of_possible_routes(gridSize):
valid_path_count = 0
for x in xrange(1, 2 ** (gridSize * 2)):
if is_valid_path(x, gridSize):
valid_path_count += 1
return valid_path_count
def solve_problem_using_approach_one(maxGridSize):
unit_tests()
print "Looking for solution using approach one... for grid sizes up to", maxGridSize
for x in xrange(1, maxGridSize + 1):
print "time is now", time.localtime(), x, number_of_possible_routes(x)
#
# functions for my second attempt at solving this problem
#
def next_path(path):
for index in xrange(1, len(path) + 1):
i = len(path) - index
if path[i] < len(path):
path[i] += 1
for ii in xrange(i + 1, len(path)):
path[ii] = path[i]
return True
return False
def path_count(gridSize):
count = 0
path = [0 for x in xrange(0, gridSize)] # the first path
while True:
#print path
count += 1
if not next_path(path):
break;
return count
def solve_problem_using_approach_two(maxGridSize):
print "Looking for solution using approach two... for grid sizes up to", maxGridSize
for n in xrange(1, maxGridSize + 1):
print "time is now", time.localtime(), "gridSize ==", n, "has", path_count(n), "paths"
#
# functions for my third attempt at solving this problem
#
def sum(start, stop, func):
sum = 0
for x in xrange(start, stop + 1):
sum += func(x)
return sum
def level_sum(level, start_value, level_count):
if level == 1:
return sum(start_value, level_count, lambda a: 1)
return sum(start_value, level_count, lambda a: level_sum(level - 1, a, level_count))
def solve_problem_using_approach_three(maxGridSize):
print "Looking for solution using approach three... for grid sizes up to", maxGridSize
for x in xrange(1, maxGridSize + 1):
print "time is now", time.localtime(), x, level_sum(x, 0, x)
#
# functions for my fourth attempt at solving this problem
#
def factorial(n):
if n == 0:
return 1
return n * factorial(n - 1)
def binomial_coefficient(n, k):
return factorial(n) / (factorial(k) * factorial(n - k))
def path_count_using_binomial_sum(gridSize):
return sum(0, gridSize, lambda k: binomial_coefficient(gridSize, k)**2)
def solve_problem_using_approach_four(maxGridSize):
print "Looking for solution using approach four... for grid sizes up to", maxGridSize
for x in xrange(1, maxGridSize + 1):
print "time is now", time.localtime(), x, path_count_using_binomial_sum(x)
#
# functions for my fifth attempt at solving this problem
#
# ********
#
# Here is a detailed explanation of the thinking that lead me
# to this particular solution:
#
# While traversing the grid from top-left to down-right you
# make a series of choices where you go either DOWN or RIGHT.
# Since backtracking is not allowed, you can never move UP or LEFT.
# Therefore, the path you take through the grid can be described
# by a sequence of "n" R's and "n" D's (where R means RIGHT etc).
# Note that to reach the down-right part of an NxN grid it's necessary
# to move RIGHT exactly N times and also DOWN exactly N times.
#
# Moving straight along the top edge of the a 5x5 grid and then straight
# down would be represented by the sequence:
#
# RRRRRDDDDD
#
# Moving in a zig zag pattern across the diagonal of the grid would
# be repesented by one of the sequences:
#
# RDRDRDRDRD or DRDRDRDRDR
#
# So basically each sequence of exactly N R's and also N D's will
# represent a path through an NxN grid, and at the same time each path
# through a NxN grid will be represented by exactly one such sequence.
#
# Thus, what we want to know is "In how many ways can be re-order a
# sequence of N R's and N D's". The answer goes back to basic
# combinatorics.
#
# In general X objects can be arranged in factorial(X) ways because
# first you choose then object which is supposed to go first and at that
# time you have X objects to choose from, after that you choose the
# object that should go after the first object and at this time you
# have only X-1 choices because you can't choose the already choosen
# object again. And thus you end up with X * (X - 1) * (X - 2) * ...
# ... * 1 = X!
#
# However, when we re-arrange a sequence of R'd and D's then each R is
# equivalent to each other R and thus if we change the order of the
# first two R's in this sequence:
#
# RRRDDDRRDD
#
# Then we would still have the same sequence. If instead of N R's and
# N D's we had any set of 2*N unique objects then the numbers of ways
# they could be ordered would be factorial(2*N) but since the R's are
# all equivalent we need to disregard all the combinations where just
# one R is swapped with another R.
#
# Now if we think about only the subset of R's inside the sequence,
# they can under the same logic be re-arranged in factorial(N) ways
# because we know that the number of R's is exactly N. The same goes
# for the D's, there is factorial(N) ways to re-arrange the D's without
# ending up with a new different path.
#
# For every equivalent re-arrangement of the R's we can also put the D's
# into factorial(N) different orders without ending up with a new path.
# This means that, for example, in the sequence:
#
# RRRDDDRRDD
# ...we can actually re-arrange the R's in factorial(5) ways while
# still representing the same path. Similarly, we can re-arrange the D's
# in factorial(5) ways. If we are allowed to swap R's with other R's and
# also swap D's with other D's then we can actually re-order this
# sequence in factorial(5)*factorial(5) different ways while the
# sequence still represents the exact same path.
#
# So there is factorial(2*N) different ways to re-order the R's and D's
# but out of those orderings we know that factorial(N)*factorial(N) are
# just "dummy orders" which represent the same path through the grid.
#
# Consequencely, the number of unique paths through the grid must be:
#
# factorial(2*N) / (factorial(N) * factorial(N))
#
# This is implemented below and it works like a charm!
#
def path_count_combinatorial(gridSize):
return factorial(2 * gridSize) / (factorial(gridSize) * factorial(gridSize))
def solve_problem_using_approach_five(maxGridSize):
print "Looking for solution using approach five... for grid sizes up to", maxGridSize
for x in xrange(1, maxGridSize + 1):
print "time is now", time.localtime(), x, path_count_combinatorial(x)
#
# main program
#
solve_proble
|
jchodera/bhmm
|
bhmm/init/__init__.py
|
Python
|
lgpl-3.0
| 859
| 0.001164
|
# This file is part of BHMM (Bayesian Hidden Markov Models).
#
# Copyright (c) 2016 Frank Noe (Freie Universitaet Berlin)
# and John D. Chodera (Memorial Sloan-Kettering Cancer Center, New York)
#
# BHMM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FI
|
TNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Les
|
ser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'noe'
|
feigaochn/leetcode
|
p686_repeated_string_match.py
|
Python
|
mit
| 1,105
| 0.001817
|
#!/usr/bin/env python
# coding: utf-8
"""
Given two strings A and B, find the minimum number of times A has to be repeated such that B is a substring of it. If no such solution, return -1.
For example, with A = "abcd" and B = "cdabcdab".
Return 3, because by repeating A three times (“abcdabcdabcd”), B is a substring of it; and B is not a substring of A repeated two times ("abcdabcd").
Note:
The length of A and B will be between 1 and 10000.
"""
|
class Solution:
def repeatedStringMatch(self, A, B):
"""
:type A: str
:type B: str
:rtype: int
"""
|
from math import ceil
repeats = int(ceil(len(B) / len(A)))
container = ""
for _ in range(repeats):
container += A
if container.find(B) != -1:
return repeats
container += A
if container.find(B) != -1:
return repeats + 1
return -1
if __name__ == '__main__':
sol = Solution().repeatedStringMatch
print(sol("abcd", "cdabcdab"))
print(sol("abc", "cdabcdab"))
print(sol("abc", "cdabcdab"))
|
jeroanan/Nes2
|
Tests/OpCodeTests/TestRtiOpCode.py
|
Python
|
bsd-3-clause
| 313
| 0.003195
|
from Chip import OpCodeDefinitions
from Tests.OpCodeTests.OpCodeTestBase import OpCodeTestBase
class TestRt
|
iOpCode(OpCodeTestBase):
def test_execute_rti_implied_command_calls_and_method(self):
self.assert_opcode_execution(OpCodeDefinitions.rti_implied_command, self.target.get_rti_com
|
mand_executed)
|
muccg/rdrf
|
rdrf/rdrf/admin.py
|
Python
|
agpl-3.0
| 19,470
| 0.001079
|
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from django.contrib import admin
from django.urls import reverse
from rdrf.models.definition.models import Registry
from rdrf.models.definition.models import RegistryForm
from rdrf.models.definition.models import QuestionnaireResponse
from rdrf.models.definition.models import CDEPermittedValue
from rdrf.models.definition.models import Notification
from rdrf.models.definition.models import CDEPermittedValueGroup
from rdrf.models.definition.models import CommonDataElement
from rdrf.models.definition.models import Section
from rdrf.models.definition.models import ConsentSection
from rdrf.models.definition.models import ConsentQuestion
from rdrf.models.definition.models import DemographicFields
from rdrf.models.definition.models import CdePolicy
from rdrf.models.definition.models import EmailNotification
from rdrf.models.definition.models import EmailTemplate
from rdrf.models.definition.models import EmailNotificationHistory
from rdrf.models.definition.models import ContextFormGroup
from rdrf.models.definition.models import ContextFormGroupItem
from rdrf.models.definition.models import CDEFile
from rdrf.models.definition.models import ConsentRule
from rdrf.models.definition.models import ClinicalData
from rdrf.models.definition.models import RegistryYaml
from rdrf.models.definition.models import DropdownLookup
from rdrf.models.proms.models import Survey
from rdrf.models.proms.models import SurveyQuestion
from rdrf.models.proms.models import Precondition
from rdrf.models.proms.models import SurveyAssignment
from rdrf.models.proms.models import SurveyRequest
from rdrf.models.definition.review_models import Review
from rdrf.models.definition.review_models import ReviewItem
from rdrf.models.definition.review_models import PatientReview
from rdrf.models.definition.review_models import PatientReviewItem
from rdrf.models.definition.verification_models import Verification
from rdrf.system_role import SystemRoles
from rdrf.models.definition.models import CustomAction
from rdrf.models.task_models import CustomActionExecution
from reversion.admin import VersionAdmin
import logging
from django.http import HttpResponse
from wsgiref.util import FileWrapper
import io as StringIO
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.conf import settings
from django.contrib.auth import get_user_model
from rdrf.admin_forms import RegistryFormAdminForm
from rdrf.admin_forms import EmailTemplateAdminForm
from rdrf.admin_forms import DemographicFieldsAdminForm
from functools import reduce
logger = logging.getLogger(__name__)
if settings.SYSTEM_ROLE != SystemRoles.CIC_PROMS:
@admin.register(ClinicalData)
class BaseReversionAdmin(VersionAdmin):
pass
class SectionAdmin(admin.ModelAdmin):
list_display = ('code', 'display_name')
ordering = ['code']
search_fields = ['code', 'display_name']
def has_add_permission(self, request, *args, **kwargs):
if request.user.is_superuser:
return True
return False
def has_change_permission(self, request, *args, **kwargs):
if request.user.is_superuser:
return True
return False
def has_delete_permission(self, request, *args, **kwargs):
if request.user.is_superuser:
return True
return False
class RegistryFormAdmin(admin.ModelAdmin):
list_display = ('registry', 'name', 'is_questionnaire', 'position')
ordering = ['registry', 'name']
form = RegistryFormAdminForm
list_filter = ['registry']
def has_add_permission(self, request, *args, **kwargs):
if request.user.is_superuser:
return True
return False
def has_change_permission(self, request, *args, **kwargs):
if request.user.is_superuser:
return True
return False
def has_delete_permission(self, request, *args, **kwargs):
if request.user.is_superuser:
return True
return False
def export_registry_action(modeladmin, request, registry_models_selected):
from datetime import datetime
export_time = str(datetime.now())
def export_registry(registry, request):
from rdrf.services.io.defs.exporter import Exporter
exporter = Exporter(registry)
logger.info("EXPORTYAML %s %s" % (request.user,
registry.code))
try:
yaml_data, errors = exporter.export_yaml()
if errors:
logger.error("Error(s) exporting %s:" % registry.name)
for error in errors:
logger.error("Export Error: %s" % error)
messages.error(request, "Error in export of %s: %s" %
(registry.name, error))
return None
return yaml_data
except Exception as ex:
logger.error("export registry action for %s error: %s" % (registry.name, ex))
messages.error(request, "Custom Action Failed: %s" % ex)
return None
registrys = [r for r in registry_models_selected]
if len(registrys) == 1:
registry = registrys[0]
yaml_export_filename = registry.name + ".yaml"
yaml_data = export_registry(registry, request)
if yaml_data is None:
return HttpResponseRedirect("")
myfile = StringIO.StringIO()
myfile.write(yaml_data)
myfile.flush()
myfile.seek(0)
response = HttpResponse(FileWrapper(myfile), content_type='text/yaml')
yaml_export_filename = "export_%s_%s" % (export_time, yaml_export_filename)
response['Content-Disposition'] = 'attachment; filename="%s"' % yaml_export_filename
return response
else:
import zipfile
zippedfile = StringIO.StringIO()
zf = zipfile.ZipFile(zippedfile, mode='w', compression=zipfile.Z
|
IP_DEFLATED)
for registry in registrys:
yaml_data = export_registry(registry, request)
if yaml_data is None:
return
|
HttpResponseRedirect("")
zf.writestr(registry.code + '.yaml', yaml_data)
zf.close()
zippedfile.flush()
zippedfile.seek(0)
response = HttpResponse(FileWrapper(zippedfile), content_type='application/zip')
name = "export_" + export_time + "_" + \
reduce(lambda x, y: x + '_and_' + y, [r.code for r in registrys]) + ".zip"
response['Content-Disposition'] = 'attachment; filename="%s"' % name
return response
export_registry_action.short_description = "Export"
def generate_questionnaire_action(modeladmin, request, registry_models_selected):
for registry in registry_models_selected:
registry.generate_questionnaire()
generate_questionnaire_action.short_description = _("Generate Questionnaire")
class RegistryAdmin(admin.ModelAdmin):
list_display = ('name', 'code', 'version')
actions = [export_registry_action, generate_questionnaire_action]
def get_queryset(self, request):
if not request.user.is_superuser:
user = get_user_model().objects.get(username=request.user)
return Registry.objects.filter(registry__in=[reg.id for reg in user.registry.all()])
return Registry.objects.all()
def has_add_permission(self, request, *args, **kwargs):
if request.user.is_superuser:
return True
return False
def has_change_permission(self, request, *args, **kwargs):
if request.user.is_superuser:
return True
return False
def has_delete_permission(self, request, *args, **kwargs):
if request.user.is_superuser:
return True
return False
def get_urls(self):
original_urls = super(RegistryAdmin, self).get_urls()
return original_urls
def get_readonly_fields(self, request, obj=None):
"Registry code is readonly after creation"
return () if obj is None else ("code",)
class QuestionnaireResponseAdmin(admin.ModelAdmin):
list_display = ('
|
durandj/codeeval
|
python/3_prime_palindrome.py
|
Python
|
gpl-3.0
| 521
| 0.009597
|
import math
def is_palindro
|
me(n):
s = str(n)
return s == s[::-1]
def is_prime(n):
if n <= 1:
return False
if n % 2 == 0 and n != 2:
return False
if n == 2:
return True
root = math.sqrt(n)
i = 3
while i <= root:
if n % i == 0:
return False
i += 2
return True
i = 999
while i > 0:
if not is_palindrome(i):
i -= 1
continue
if not is_prime(i):
i -= 1
continue
print i
|
break
|
kennedyshead/home-assistant
|
homeassistant/components/plex/config_flow.py
|
Python
|
apache-2.0
| 16,121
| 0.000806
|
"""Config flow for Plex."""
import copy
import logging
from aiohttp import web_response
import plexapi.exceptions
from plexapi.gdm import GDM
from plexauth import PlexAuth
import requests.exceptions
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import http
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_HOST,
CONF_PORT,
CONF_SOURCE,
CONF_SSL,
CONF_TOKEN,
CONF_URL,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from .const import (
AUTH_CALLBACK_NAME,
AUTH_CALLBACK_PATH,
AUTOMATIC_SETUP_STRING,
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_SERVER_IDENTIFIER,
CONF_USE_EPISODE_ART,
DEFAULT_PORT,
DEFAULT_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
MANUAL_SETUP_STRING,
PLEX_SERVER_CONFIG,
SERVERS,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified
from .server import PlexServer
HEADER_FRONTEND_BASE = "HA-Frontend-Base"
_LOGGER = logging.getLogger(__package__)
@callback
def configured_servers(hass):
"""Return a set of the configured Plex servers."""
return {
entry.data[CONF_SERVER_IDENTIFIER]
for entry in hass.config_entries.async_entries(DOMAIN)
}
async def async_discover(hass):
"""Scan for available Plex servers."""
gdm = GDM()
await hass.async_add_executor_job(gdm.scan)
for server_data in gdm.entries:
await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_INTEGRATION_DISCOVERY},
data=server_data,
)
class PlexFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Plex config flow."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PlexOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the Plex flow."""
self.current_login = {}
self.available_servers = None
self.plexauth = None
self.token = None
self.client_id = None
self._manual = False
async def async_step_user(
self, user_input=None, errors=None
): # pylint: disable=arguments-differ
"""Handle a flow initialized by the user."""
if user_input is not None:
return await self.async_step_plex_website_auth()
if self.show_advanced_options:
return await self.async_step_user_advanced(errors=errors)
return self.async_show_form(step_id="user", errors=errors)
async def async_step_user_advanced(self, user_input=None, errors=None):
"""Handle an advanced mode flow initialized by the user."""
if user_input is not None:
if user_input.get("setup_method") == MANUAL_SETUP_STRING:
self._manual = True
return await self.async_step_manual_setup()
return await self.async_step_plex_website_auth()
data_schema = vol.Schema(
{
vol.Required("setup_method", default=AUTOMATIC_SETUP_STRING): vol.In(
[AUTOMATIC_SETUP_STRING, MANUAL_SETUP_STRING]
)
}
)
return self.async_show_form(
step_id="user_advanced", data_schema=data_schema, errors=errors
)
async def async_step_manual_setup(self, user_input=None, errors=None):
"""Begin manual configuration."""
if user_input is not None and errors is None:
user_input.pop(CONF_URL, None)
host = user_input.get(CONF_HOST)
if host:
port = user_input[CONF_PORT]
prefix = "https" if user_input.get(CONF_SSL) else "http"
user_input[CONF_URL] = f"{prefix}://{host}:{port}"
elif CONF_TOKEN not in user_input:
return await self.async_step_manual_setup(
user_input=user_input, errors={"base": "host_or_token"}
)
return await self.async_step_server_validate(user_input)
previous_input = user_input or {}
data_schema = vol.Schema(
{
vol.Optional(
CONF_HOST,
description={"suggested_value": previous_input.get(CONF_HOST)},
): str,
vol.Required(
CONF_PORT, default=previous_input.get(CONF_PORT, DEFAULT_PORT)
): int,
vol.Required(
CONF_SSL, default=previous_input.get(CONF_SSL, DEFAULT_SSL)
): bool,
vol.Required(
CONF_VERIFY_SSL,
default=previous_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
): bool,
vol.Optional(
CONF_TOKEN,
description={"suggested_value": previous_input.get(CONF_TOKEN)},
): str,
}
)
return self.async_show_form(
step_id="manual_setup", data_schema=data_schema, errors=errors
)
async def async_step_server_validate(self, server_config):
"""Validate a provided configuration."""
errors = {}
self.current_login = server_config
plex_server = PlexServer(self.hass, server_config)
try:
await self.hass.async_add_executor_job(plex_server.connect)
except NoServersFound:
_LOGGER.error("No servers linked to Plex account")
errors["base"] = "no_servers"
except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized):
_LOGGER.error("Invalid credentials provided, config not created")
errors[CONF_TOKEN] = "faulty_credentials"
except requests.exceptions.SSLError as error:
_LOGGER.error("SSL certificate error: [%s]", error)
errors["base"] = "ssl_error"
except (plexapi.exceptions.NotFound, requests.exceptions.ConnectionError):
server_identifier = (
server_config.get(CONF_URL) or plex_server.server_choice or "Unknown"
)
|
_LOGGER.error("Plex server could not be reached: %s", server_identifier)
errors[CONF_HOST] = "not_found"
except ServerNotSpecified as available_servers:
self.available_servers = available_servers.args[0]
return await self.async_step_select_server()
except Exception as error: # pylint: disable=broad-except
_LOGGER.exception("Unknown error connecting to Plex server: %s", error
|
)
return self.async_abort(reason="unknown")
if errors:
if self._manual:
return await self.async_step_manual_setup(
user_input=server_config, errors=errors
)
return await self.async_step_user(errors=errors)
server_id = plex_server.machine_identifier
url = plex_server.url_in_use
token = server_config.get(CONF_TOKEN)
entry_config = {CONF_URL: url}
if self.client_id:
entry_config[CONF_CLIENT_ID] = self.client_id
if token:
entry_config[CONF_TOKEN] = token
if url.startswith("https"):
entry_config[CONF_VERIFY_SSL] = server_config.get(
CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL
)
data = {
CONF_SERVER: plex_server.friendly_name,
CONF_SERVER_IDENTIFIER: server_id,
PLEX_SERVER_CONFIG: entry_config,
}
entry = await self.async_set_unique_id(server_id)
if self.context[CONF_SOURCE] == config_entries.SOURCE_REAUTH:
self.hass.config_e
|
etz69/irhelper
|
vol_plugins/hollowfind.py
|
Python
|
gpl-3.0
| 18,182
| 0.00935
|
# Author: Monnappa K A
# Email : monnappa22@gmail.com
# Twitter: @monnappa22
# Description: Volatility plugin to detect different types of Process Hollowing
import os
import volatility.obj as obj
import volatility.utils as utils
from volatility.plugins.taskmods import PSList
import volatility.plugins.vadinfo as vadinfo
import volatility.plugins.malware.malfind as malfind
from volatility.renderers.basic import Address,Hex
hollow_types = dict(enumerate(["Invalid EXE Memory Protection and Process Path Discrepancy",
"No VAD Entry For Process Executable",
"Process Base Address and Memory Protection Discrepancy"]))
class HollowFind(vadinfo.VADDump):
"""Detects different types of Process Hollowing"""
def __init__(self, config, *args, **kwargs):
vadinfo.VADDump.__init__(self, config, *args, **kwargs)
config.remove_option("BASE")
def update_proc_peb_info(self, psdata):
self.proc_peb_info = {}
# Builds a dictionary of process executable information from PEB
for proc in psdata:
pid = int(proc.UniqueProcessId)
self.proc_peb_info[pid] = [proc,
pid,
proc.ImageFileName,
int(proc.InheritedFromUniqueProcessId),
str(proc.CreateTime)]
if proc.Peb:
# gets process information for the process executable from PEB and updates the dictionary
mods = proc.get_load_modules()
for mod in mods:
ext = os.path.splitext(str(mod.FullDllName))[1].lower()
if (ext == ".exe"):
proc_cmd_line = proc.Peb.ProcessParameters.CommandLine
proc_image_baseaddr = proc.Peb.ImageBaseAddress
mod_baseaddr = mod.DllBase
mod_size = mod.SizeOfImage
mod_basename = mod.BaseDllName
mod_fullname = mod.FullDllName
break
self.proc_peb_info[pid].extend([str(proc_cmd_line),
Address(proc_image_baseaddr),
Address(mod_baseaddr),
Hex(mod_size),
str(mod_basename),
str(mod_fullname or "")])
else:
self.proc_peb_info[pid].extend(["No PEB", Address(0), Address(0), Hex(0), "No PEB", "No PEB"])
def update_proc_vad_info(self, proc_peb_info):
"""Builds a dictionary of process executable information from VAD"""
self.proc_vad_info = {}
for pid in proc_peb_info:
self.proc_vad_info[pid] = []
proc = proc_peb_info[pid][0]
if proc.Peb:
# gets process information for the process executable from VAD and updates the dictionary
for vad, addr_space in proc.get_vads(vad_filter = proc._mapped_file_filter):
ext = ""
vad_found = False
if obj.Object("_IMAGE_DOS_HEADER", offset = vad.Start, vm = addr_space).e_magic != 0x5A4D:
continue
if str(vad.FileObject.FileName or ''):
ext = os.path.splitext(str(vad.FileObject.FileName))[1].lower()
if (ext == ".exe") or (vad.Start == proc.Peb.ImageBaseAddress):
vad_filename = vad.FileObject.FileName
vad_baseaddr = vad.Start
vad_size = vad.End - vad.Start
vad_protection = vadinfo.PROTECT_FLAGS.get(vad.VadFlags.Protection.v())
vad_tag = vad.Tag
self.proc_vad_info[pid].extend([str(vad_filename or ''),
Address(vad_baseaddr),
Hex(vad_size),
str(vad_protection or ''),
str(vad_tag or '')])
vad_found = True
break
|
if vad_found == False:
self.proc_vad_info[pid].extend(["NA", Address(0), Hex(0), "NA", "NA"])
else:
self.proc_vad_info[pid].extend(["No VAD", Address(0), Hex(0), "No VAD", "No VAD"])
def get_proc_peb_info(self):
return self.proc_peb_info
def get_proc_vad_info(self):
return self.proc_vad_info
def detect_proc_hollow(self):
"""Detects hollowed processes and
|
returns dictionary with pid as the key and type of process hollowing as value"""
proc_peb_info = self.get_proc_peb_info()
proc_vad_info = self.get_proc_vad_info()
hol_type = None
self.hollowed_procs = {}
for pid in proc_peb_info:
(proc, pid, proc_name, ppid, create_time, proc_cmd_line, proc_image_baseaddr, mod_baseaddr,
mod_size, mod_basename, mod_fullname) = proc_peb_info[pid]
(vad_filename, vad_baseaddr, vad_size, vad_protection, vad_tag) = proc_vad_info[pid]
if vad_protection == "PAGE_EXECUTE_READWRITE":
hol_type = 0
self.hollowed_procs[pid] = hol_type
elif vad_protection == "NA":
hol_type = 1
self.hollowed_procs[pid] = hol_type
elif (vad_protection == "PAGE_EXECUTE_WRITECOPY") and (vad_baseaddr != proc_image_baseaddr):
hol_type = 2
self.hollowed_procs[pid] = hol_type
return self.hollowed_procs
def update_parent_proc_info(self, proc_peb_info):
"""Builds a dictionary containing parent process information for all the processes"""
self.parent_proc_info = {}
for pid in proc_peb_info:
self.parent_proc_info[pid] = []
if pid == 4:
self.parent_proc_info[pid].extend(["", 0])
else:
ppid = int(proc_peb_info[pid][3])
if ppid in proc_peb_info:
ppname = str(proc_peb_info[ppid][2])
else:
ppname = "NA"
self.parent_proc_info[pid].extend([ppname, ppid])
def get_parent_proc_info(self):
return self.parent_proc_info
def get_similar_procs(self, procid):
"""Given a process id returns a list containing information of similar processes"""
self.similar_procs = []
proc_peb_info = self.get_proc_peb_info()
parent_proc_info = self.get_parent_proc_info()
pname = proc_peb_info[procid][2]
create_time = proc_peb_info[procid][4]
ppname, ppid = parent_proc_info[procid]
self.similar_procs.append([pname, procid, ppname, ppid, create_time])
for pid in proc_peb_info:
if pid == procid:
continue
if pname == proc_peb_info[pid][2]:
proc_name = proc_peb_info[pid][2]
creation_time = proc_peb_info[pid][4]
parent_name, parent_id = parent_proc_info[pid]
self.similar_procs.append([proc_name, pid, parent_name, parent_id, creation_time])
return self.similar_procs
def calculate(self):
if self._config.PID:
filter_pid = self._config.PID
# This is so that when -p option is given it can still enumerate all processes to determine similar processes
self._config.PID = None
else:
filter_pid = None
|
RossBrunton/BMAT
|
tags/context_processors.py
|
Python
|
mit
| 338
| 0.008876
|
"""C
|
ontext processors, these get called and add things to template contexts"""
from .models import Tag
def pinned_tags(request):
""" Adds the list of tags this user has pinned """
out = {}
if request.user.is_authenticated():
out["pinned_tags"] = Tag.by_user(request.user).filter(pinned=True)
r
|
eturn out
|
openjck/distribution-viewer
|
viewer/api/views.py
|
Python
|
mpl-2.0
| 4,165
| 0
|
import datetime
from django.conf import settings
from django.contrib.auth import get_user_mod
|
el, login
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from djang
|
o.views.decorators.csrf import csrf_exempt
from oauth2client import client, crypt
from rest_framework.decorators import (api_view, authentication_classes,
permission_classes,
renderer_classes)
from rest_framework.exceptions import (AuthenticationFailed, NotFound,
ParseError, ValidationError)
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from .models import CategoryCollection, DataSet, Metric, NumericCollection
from .renderers import MetricsJSONRenderer
from .serializers import (CategoryDistributionSerializer, MetricSerializer,
NumericDistributionSerializer)
@api_view(['GET'])
@renderer_classes([JSONRenderer])
def metric(request, metric_id):
# Get requested population or default to "All".
pop = request.query_params.get('pop', 'All')
pops = pop.split(',')
# Get requested dataset or most recent prior dataset from date.
date = request.query_params.get('date',
datetime.date.today().strftime('%Y-%m-%d'))
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d').date()
except ValueError:
raise ParseError('Date provided not valid.')
dataset = DataSet.objects.filter(
date__lte=date, display=True).order_by('-date').first()
if not dataset:
raise NotFound('No data set with given date found.')
metric = get_object_or_404(Metric, id=metric_id)
# Note: We filter by `population='All'` here to get a single record. We
# collect the requested populations later in the serializer.
if metric.type == 'C':
qs = (CategoryCollection.objects.select_related('dataset', 'metric')
.get(dataset=dataset, metric=metric,
population='All'))
serializer = CategoryDistributionSerializer(qs, populations=pops)
return Response(serializer.data)
elif metric.type == 'N':
qs = (NumericCollection.objects.select_related('dataset', 'metric')
.get(dataset=dataset, metric=metric,
population='All'))
serializer = NumericDistributionSerializer(qs, populations=pops)
return Response(serializer.data)
@api_view(['GET'])
@renderer_classes([MetricsJSONRenderer])
def metrics(request):
metrics = Metric.objects.all().order_by('name')
return Response([MetricSerializer(m).data for m in metrics])
@csrf_exempt
@api_view(['POST'])
@authentication_classes([])
@permission_classes([])
def verify_google_token(request):
token = request.data.get('token')
if token is None:
raise ValidationError({'detail': 'Auth token required.'})
try:
idinfo = client.verify_id_token(token, settings.GOOGLE_AUTH_KEY)
if idinfo['iss'] not in ['accounts.google.com',
'https://accounts.google.com']:
raise crypt.AppIdentityError('Wrong issuer.')
if idinfo.get('hd') != settings.GOOGLE_AUTH_HOSTED_DOMAIN:
raise crypt.AppIdentityError('Wrong hosted domain.')
except crypt.AppIdentityError as e:
raise AuthenticationFailed(e)
defaults = {
'email': idinfo['email'],
'first_name': idinfo.get('given_name', ''),
'last_name': idinfo.get('family_name', ''),
}
user, created = get_user_model().objects.get_or_create(
username=idinfo['email'], defaults=defaults)
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
return Response({})
def login_view(request):
return TemplateResponse(
request,
template='viewer/login.html',
context={'google_clientid': settings.GOOGLE_AUTH_KEY,
'next': request.GET.get('next', '/')})
|
Azure/azure-sdk-for-python
|
sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_01_01_preview/aio/operations/_permissions_operations.py
|
Python
|
mit
| 10,365
| 0.004824
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PermissionsOperations:
"""PermissionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.authorization.v2018_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_for_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PermissionGetResult"]:
"""Gets all permissions the caller has for a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PermissionGetResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2018_01_01_preview.models.PermissionGetResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PermissionGetResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_for_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group
|
_name, 'str', max_length=90, min_length=1),
'subscriptio
|
nId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PermissionGetResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Authorization/permissions'} # type: ignore
def list_for_resource(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PermissionGetResult"]:
"""Gets all permissions the caller has for a resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource.
:type resource_type: str
:param resource_name: The name of the resource to get the permissions for.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PermissionGetResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2018_01_01_preview.models.PermissionGetResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PermissionGetResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_for_resource.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str', skip_quote=True),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resourc
|
guillochon/FriendlyFit
|
mosfit/modules/engines/rprocess.py
|
Python
|
mit
| 3,010
| 0.001329
|
"""Definitions for the `RProcess` class."""
from math import isnan
import numpy as np
from astrocats.catalog.source import SOURCE
from mosfit.constants import C_CGS, DAY_CGS, IPI, KM_CGS, M_SUN_CGS
from mosfit.modules.engines.engine import Engine
from scipy.interpolate import RegularGridInterpolator
# Important: Only define one ``Module`` class per file.
class RProcess(Engine):
"""r-process decay engine.
input luminosity adapted from Metzger 2016: 2017LRR....20....3M
"""
_REFERENCES = [
{SOURCE.BIBCODE: '2013ApJ...775...18B'},
{SOURCE.BIBCODE: '2017LRR....20....3M'},
{SOURCE.BIBCODE: '2017arXiv170708132V'}
]
ckm = C_CGS / KM_CGS
def __init__(self, **kwargs):
"""Initialize module."""
super(RProcess, self).__init__(**kwargs)
self._wants_dense = True
barnes_v = np.asarray([0.1, 0.2, 0.3])
barnes_M = np.asarray([1.e-3, 5.e-3, 1.e-2, 5.e-2])
barnes_a = np.asarray([[2.01, 4.52, 8.16], [0.81, 1.9, 3.2], [
0.56, 1.31, 2.19], [.27, .55, .95]])
barnes_b = np.asarray([[0.28, 0.62, 1.19], [0.19, 0.28, 0.45], [
0.17, 0.21, 0.31], [0.10, 0.13, 0.15]])
barnes_d = np.asarray([[1.12, 1.39, 1.52], [0.86, 1.21, 1.39], [
0.74, 1.13, 1.32], [0.6, 0.9, 1.13]])
self.therm_func_a = RegularGridInterpolator(
(barnes_M, barnes_v), barnes_a, bounds_error=False, fill_value=None)
self.therm_func_b = RegularGridInterpolator(
(barnes_M, barnes_v), barnes_b, bounds_error=False, fill_value=None)
self.therm_func_d = RegularGridInterpolator(
(barnes_M, barnes_v), barnes_d, bounds_error=False, fill_value=None)
def process(self, **kwargs):
"""Process module."""
self._times = kwargs[self.key('dense_times')]
self._mass = kwargs[self.key('mejecta')] * M_SUN_CGS
self._rest_texplosion = kwargs[self.key('resttexplosion')]
self._vejecta = kwargs[self.key('vejecta')]
self._a = self.therm_func_a(
[self._mass / M_SUN_CGS, self._vejecta / self.ckm])[0]
self._bx2 = 2.0 * self.therm_func_b(
[self._mass / M_SUN_CGS, self._vejecta / self.ckm])[0]
self._d = self.therm_func_d(
[self._mass / M_SUN_CGS, self._vejecta / self.ckm])[0]
ts = [
np.inf
if self._rest_texplosion > x else (x - self._rest_texplosion)
for x in self._times
]
self._lscale = self._mass * 4.0e18 * 0.36
lumino
|
sities = [
self._lscale * (0.5 - IPI * np.arctan(
(t * DAY_CGS - 1.3) / 0.11)) ** 1.3 *
(np.exp(-self._a * t) + np.log1p(
self._bx2 * t ** self._d) / (self._bx2 * t ** self._d))
for t in ts
]
luminosities = [0.0 if isnan(x)
|
else x for x in luminosities]
return {self.dense_key('luminosities'): luminosities}
|
GrotheFAF/client
|
src/model/player.py
|
Python
|
gpl-3.0
| 4,660
| 0
|
from PyQt5.QtCore import QObject, pyqtSignal
class Player(QObject):
updated = pyqtSignal(object, object)
newCurrentGame = pyqtSignal(object, object, object)
"""
Represents a player the client knows about.
"""
def __init__(self,
id_,
login,
global_rating=(1500, 500),
ladder_rating=(1500, 500),
number_of_games=0,
avatar=None,
country=None,
clan=None,
league=None):
QObject.__init__(self)
"""
Initialize a Player
"""
# Required fields
self.id = int(id_)
self.login = login
self.global_rating = global_rating
self.ladder_rating = ladder_rating
self.number_of_games = number_of_games
self.avatar = avatar
self.country = country
self.clan = clan
self.league = league
# The game the player is currently playing
self._currentGame = None
def copy(self):
s = self
p = Player(s.id, s.login, s.global_rating, s.ladder_rating,
s.number_of_games, s.avatar, s.country, s.clan, s.league)
p.currentGame = self._currentGame
return p
def update(self,
id_=None,
login=None,
global_rating=None,
ladder_rating=None,
number_of_games=None,
avatar=None,
country=None,
clan=None,
league=None):
old_data = self.copy()
# Ignore id and login (they are be immutable)
# Login should be mutable, but we look up things by login right now
if global_rating is not None:
self.global_rating = global_rating
if ladder_rating is not None:
self.ladder_rating = ladder_rating
if number_of_games is not None:
self.number_of_games = number_of_games
if avatar is not None:
self.avatar = avatar
if country is not None:
self.country = country
if clan is not None:
self.clan = clan
if league is not None:
self.league = league
self.updated.emit(self, old_data)
def __hash__(self):
"""
Index by id
"""
return self.id.__hash__()
def __index__(self):
return self.id
def __eq__(self, other):
"""
Equality by id
:param other: player object to compare with
"""
if not isinstance(other, Player):
return False
return other.id == self.id
def rounded_rating_estimate(self):
"""
Get the conservative estimate of the players global trueskill rating,
rounded to nearest 100
"""
return round((self.rating_estimate()/100))*100
def rating_estimate(self):
"""
Get the conservative estimate of the players global trueskill rating
"""
return int(max(0, (self.global_rating[0] - 3 * self.global_rating[1])))
def ladder_estimate(self):
"""
Get the conservative estimate of the players ladder trueskill rating
"""
return int(max(0, (self.ladder_rating[0] - 3 * self.ladder_rating[1])))
@property
def rating_mean(self):
return self.global_rating[0]
@property
def rating_deviation(self):
return self.global_rating[1]
@property
def ladder_rating_mean(self):
return self.ladder_rating[0]
@property
def ladder_rating_deviation(self):
return self.ladder_rating[1]
def __repr__(self):
return self.__str__()
def __str__(self):
return ("Player(id={}, login={}, global_rating={}, "
"ladder_rating={})").f
|
ormat(
self.id,
self.login,
self.global_rating,
self.ladder_rating
)
@property
def currentGame(self):
return self._currentGame
@currentGame.setter
def currentGame(self, game):
self.set_current_game_defer_signal(game)()
def set_current_game_defer_signal(self, game):
if self.currentGame == game:
return lambda: None
old = self
|
._currentGame
self._currentGame = game
return lambda: self._emit_game_change(game, old)
def _emit_game_change(self, game, old):
self.newCurrentGame.emit(self, game, old)
if old is not None:
old.ingamePlayerRemoved.emit(old, self)
if game is not None:
game.ingamePlayerAdded.emit(game, self)
|
eamonnmag/hepdata3
|
hepdata/modules/records/api.py
|
Python
|
gpl-2.0
| 24,761
| 0.003029
|
# -*- coding: utf-8 -*-
#
# This file is part of HEPData.
# Copyright (C) 2016 CERN.
#
# HEPData is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""API for HEPData-Records."""
import os
from collections import OrderedDict
from functools import wraps
import subprocess
import time
from flask import redirect, request, render_template, jsonify, current_app, Response, abort
from flask_login import current_user
from invenio_accounts.models import User
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.utils import secure_filename
from hepdata.modules.converter import convert_oldhepdata_to_yaml
from hepdata.modules.permissions.api import user_allowed_to_perform_action
from hepdata.modules.permissions.models import SubmissionParticipant
from hepdata.modules.records.subscribers.api import is_current_user_subscribed_to_record
from hepdata.modules.records.utils.common import decode_string, find_file_in_directory, allowed_file, \
remove_file_extension, truncate_string, get_record_contents
from hepdata.modules.records.utils.data_processing_utils import process_ctx
from hepdata.modules.records.utils.submission import process_submission_directory, create_data_review
from hepdata.modules.submission.api import get_latest_hepsubmission
from hepdata.modules.records.utils.users import get_coordinators_in_system, has_role
from hepdata.modules.records.utils.workflow import update_action_for_submission_participant
from hepdata.modules.records.utils.yaml_utils import split_files
from hepdata.modules.stats.views import increment, get_count
from hepdata.modules.submission.models import RecordVersionCommitMessage, DataSubmission, HEPSubmission, DataReview
from hepdata.utils.file_extractor import extract
from hepdata.utils.users import get_user_from_id
from bs4 import BeautifulSoup
import tempfile
from shutil import rmtree
RECORD_PLAIN_TEXT = {
"passed": "passed review",
"attention": "attention required",
"todo": "to be reviewed"
}
def returns_json(f):
@wraps(f)
def decorated_function(*args, **kwargs):
r = f(*args, **kwargs)
return Response(r, content_type='application/json; charset=utf-8')
return decorated_function
def format_submission(recid, record, version, version_count, hepdata_submission,
data_table=None):
"""
Performs all the processing of the record to be displayed.
:param recid:
:param record:
:param version:
:param version_count:
:param hepdata_submission:
:param data_table:
:return:
"""
ctx = {}
if hepdata_submission is not None:
ctx['site_url'] = current_app.config.get('SITE_URL', 'https://www.hepdata.net')
ctx['record'] = record
ctx["version_count"] = version_count
if version is not -1:
ctx["version"] = version
else:
# we get the latest version by default
ctx["version"] = version_count
if record is not None:
if "collaborations" in record and type(record['collaborations']) is not list:
collaborations = [x.strip() for x in record["collaborations"].split(",")]
ctx['record']['collaborations'] = collaborations
authors = record.get('authors', None)
create_breadcrumb_text(authors, ctx, record)
get_commit_message(ctx, recid)
if authors:
truncate_author_list(record)
determine_user_privileges(recid, ctx)
else:
ctx['record'] = {}
determine_user_privileges(recid, ctx)
ctx['show_upload_widget'] = True
ctx['show_review_widget'] = False
ctx['reviewer_count'] = SubmissionParticipant.query.filter_by(
publication_recid=recid, status="primary", role="reviewer").count()
ctx['reviewers_notified'] = hepdata_submission.reviewers_notified
ctx['record']['last_updated'] = hepdata_submission.last_updated
ctx['record']['hepdata_doi'] = "{0}".format(hepdata_submission.doi)
if ctx['version'] > 1:
ctx['record']['hepdata_doi'] += ".v{0}".format(ctx['version'])
ctx['recid'] = recid
ctx["status"] = hepdata_submission.overall_status
ctx['record']['data_abstract'] = decode_string(hepdata_submission.data_abstract)
extract_journal_info(record)
if hepdata_submission.overall_status != 'finished' and ctx["version_count"] > 0:
if not (ctx['show_review_widget']
or ctx['show_upload_widget']
or ctx['is_submission_coordinator_or_admin']):
# we show the latest approved version.
ctx["version"] -= 1
ctx["version_count"] -= 1
ctx['additional_resources'] = submission_has_resources(hepdata_submission)
# query for a related data submission
data_record_query = DataSubmission.query.filter_by(
publication_recid=recid,
version=ctx["version"]).order_by(DataSubmission.id.asc())
format_tables(ctx, data_record_query, data_table, recid)
ctx['access_count'] = get_count(recid)
ctx['mode'] = 'record'
ctx['coordinator'] = hepdata_submission.coordinator
ctx['coordinators'] = get_coordinators_in_system()
ctx['record'].pop('authors', None)
return ctx
def format_tables(ctx, data_record_query, data_table, recid):
"""
Finds all the tables related to a submission and formats
them for display in the UI or as JSON.
:return:
"""
first_data_id = -1
data_table_metadata, first_data_id = process_data_tables(
ctx, data_record_query, first_data_id, data_table)
assign_or_create_review_status(data_table_metadata, recid, ctx["version"])
ctx['watched'] = is_current_user_subscribed_to_record(recid)
ctx['table_to_show'] = first_data_id
if 'table' in request.args:
if request.args['table']:
ctx['table_to_show'] = request.args['table']
ctx['data_tables'] = data_table_metadata.values()
def get_commit_message(ctx, recid):
"""
Returns a commit messag
|
e for the current version if present.
:
|
param ctx:
:param recid:
"""
try:
commit_message_query = RecordVersionCommitMessage.query \
.filter_by(version=ctx["version"], recid=recid)
if commit_message_query.count() > 0:
commit_message = commit_message_query.one()
ctx["revision_message"] = {
'version': commit_message.version,
'message': commit_message.message}
except NoResultFound:
pass
def create_breadcrumb_text(authors, ctx, record):
"""Creates the breadcrumb text for a submission."""
if "first_author" in record and 'full_name' in record["first_author"] \
and record["first_author"]["full_name"] is not None:
ctx['breadcrumb_text'] = record["first_author"]["full_name"]
if authors is not None and len(record['authors']) > 1:
ctx['breadcrumb_text'] += " et al."
def submission_has_resources(hepsubmission):
"""
Returns whether the submission has resources attached.
:param hepsubmission: HEPSubmission object
:return: bool
"""
return len(hepsubmission.resources) > 0
def extract_journal_info(record):
if recor
|
kubeflow/pipelines
|
sdk/python/kfp/deprecated/dsl/artifact_utils.py
|
Python
|
apache-2.0
| 3,439
| 0
|
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper utils used in artifact and ontology_artifact classes."""
from typing import Any, Dict, Tuple
import enum
import jsonschema
import os
import yaml
class SchemaFieldType(enum.Enum):
"""Supported Schema field types."""
NUMBER = 'number'
INTEGER = 'integer'
STRING = 'string'
BOOL = 'bool'
OBJECT = 'object'
ARRAY = 'array'
def parse_schema(yaml_schema: str) -> Tuple[str, Dict[str, SchemaFieldType]]:
"""Parses yaml schema.
Ensures that schema is well-formed and returns dictionary of properties and
its type for type-checking.
Args:
yaml_schema: Yaml schema to be parsed.
Returns:
str: Title set in the schema.
Dict: Property name to SchemaFieldType enum.
Raises:
ValueError if title field is not set in schema or an
unsupported(i.e. not defined in SchemaFieldType)
type is specified for the field.
"""
schema = yaml.full_load(yaml_schema)
if 'title' not in schema.keys():
raise ValueError('Invalid _schema, title must be set. \
Got: {}'.format(yaml_schema))
title = schema['title']
properties = {}
if 'properties' in schema.keys():
schema_properties = schema['properties'] or {}
for property_name, property_def in schema_properties.items():
try:
properties[property_name] = SchemaFieldType(
|
property_def['type'])
except ValueError:
raise ValueError('Unsupported type:{} specified for field: {} \
in schema'.format(property_def['type'], property_name))
return title, properties
def verify_schema_instance(schema: str, instance: Dict[str, Any]):
"""Verifies instnace is well-formed against the schema.
Args:
schema: Schema to use
|
for verification.
instance: Object represented as Dict to be verified.
Raises:
RuntimeError if schema is not well-formed or instance is invalid against
the schema.
"""
if len(instance) == 0:
return
try:
jsonschema.validate(instance=instance, schema=yaml.full_load(schema))
except jsonschema.exceptions.SchemaError:
raise RuntimeError('Invalid schema schema: {} used for \
verification'.format(schema))
except jsonschema.exceptions.ValidationError:
raise RuntimeError('Invalid values set: {} in object for schema: \
{}'.format(instance, schema))
def read_schema_file(schema_file: str) -> str:
"""Reads yamls schema from type_scheams folder.
Args:
schema_file: Name of the file to read schema from.
Returns:
Read schema from the schema file.
"""
schema_file_path = os.path.join(
os.path.dirname(__file__), 'type_schemas', schema_file)
with open(schema_file_path) as schema_file:
return schema_file.read()
|
OneBitSoftware/jwtSample
|
src/Spa/env1/Lib/site-packages/gunicorn/util.py
|
Python
|
mit
| 12,319
| 0.001786
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
try:
import ctypes
except MemoryError:
# selinux execmem denial
# https://bugzilla.redhat.com/show_bug.cgi?id=488396
ctypes = None
except ImportError:
# Python on Solaris compiled with Sun Studio doesn't have ctypes
ctypes = None
import fcntl
import os
import pkg_resources
import random
import resource
import socket
import sys
import textwrap
import time
import traceback
import inspect
import errno
import warnings
from gunicorn.six import text_type, string_types
MAXFD = 1024
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
timeout_default = object()
CHUNK_SIZE = (16 * 1024)
MAX_BODY = 1024 * 132
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# Server and Date aren't technically hop-by-hop
# headers, but they are in the purview of the
# origin server which the WSGI spec says we should
# act like. So we drop them and add our own.
#
# In the future, concatenation server header values
# might be better, but nothing else does it and
# dropping them is easier.
hop_headers = set("""
connection keep-alive proxy-authenticate proxy-authorization
te trailers transfer-encoding upgrade
server date
""".split())
try:
from setproctitle import setproctitle
def _setproctitle(title):
setproctitle("gunicorn: %s" % title)
except ImportError:
def _setproctitle(title):
return
try:
from importlib import import_module
except ImportError:
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in range(level, 1, -1):
try:
|
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%
|
s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
def load_class(uri, default="sync", section="gunicorn.workers"):
if inspect.isclass(uri):
return uri
if uri.startswith("egg:"):
# uses entry points
entry_str = uri.split("egg:")[1]
try:
dist, name = entry_str.rsplit("#", 1)
except ValueError:
dist = entry_str
name = default
try:
return pkg_resources.load_entry_point(dist, section, name)
except:
exc = traceback.format_exc()
raise RuntimeError("class uri %r invalid or not found: \n\n[%s]" % (uri,
exc))
else:
components = uri.split('.')
if len(components) == 1:
try:
if uri.startswith("#"):
uri = uri[1:]
return pkg_resources.load_entry_point("gunicorn",
section, uri)
except:
exc = traceback.format_exc()
raise RuntimeError("class uri %r invalid or not found: \n\n[%s]" % (uri,
exc))
klass = components.pop(-1)
try:
mod = __import__('.'.join(components))
except:
exc = traceback.format_exc()
raise RuntimeError("class uri %r invalid or not found: \n\n[%s]" % (uri,
exc))
for comp in components[1:]:
mod = getattr(mod, comp)
return getattr(mod, klass)
def set_owner_process(uid, gid):
""" set user and group of workers processes """
if gid:
try:
os.setgid(gid)
except OverflowError:
if not ctypes:
raise
# versions of python < 2.6.2 don't manage unsigned int for
# groups like on osx or fedora
os.setgid(-ctypes.c_int(-gid).value)
if uid:
os.setuid(uid)
def chown(path, uid, gid):
try:
os.chown(path, uid, gid)
except OverflowError:
if not ctypes:
raise
os.chown(path, uid, -ctypes.c_int(-gid).value)
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Peform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on a i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existance of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
else:
_unlink = os.unlink
def unlink(filename):
try:
_unlink(filename)
except OSError as error:
# The filename need not exist.
if error.errno not in (errno.ENOENT, errno.ENOTDIR):
raise
def is_ipv6(addr):
try:
socket.inet_pton(socket.AF_INET6, addr)
except socket.error: # not a valid address
return False
return True
def parse_address(netloc, default_port=8000):
if netloc.startswith("unix:"):
return netloc.split("unix:")[1]
if netloc.startswith("unix://"):
return netloc.split("unix://")[1]
if netloc.startswith("tcp://"):
netloc = netloc.split("tcp://")[1]
# get host
if '[' in netloc and ']' in netloc:
host = netloc.split(']')[0][1:].lower()
elif ':' in netloc:
host = netloc.split(':')[0].lower()
elif netloc == "":
host = "0.0.0.0"
else:
host = netloc.lower()
#get port
netloc = netloc.split(']')[-1]
if ":" in netloc:
port = netloc.split(':', 1)[1]
if not port.isdigit():
raise RuntimeError("%r is not a valid port number." % port)
port = int(port)
else:
port = default_port
return (host, port)
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def close_on_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
def set_no
|
saruberoz/leetcode-oj
|
leetcode-solutions/python/add_two_numbers.py
|
Python
|
mit
| 1,303
| 0
|
# https://oj.leetcode.com/problems/add-two-numbers/
# 1555 / 1555 test cases passed.
# You are given two linked lists representing two non-negative numbers.
# The digits are stored in reverse order and each of their nodes contain
# a single digit. Add the two numbers and return it as a linked list.
#
# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
# Output: 7 -> 0 -> 8
# D
|
efinition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @return a ListNod
|
e
def addTwoNumbers(self, l1, l2):
carry = 0
head = ListNode(0)
curr = head
while l1 and l2:
sum = l1.val + l2.val + carry
carry = sum / 10
curr.next = ListNode(sum % 10)
l1 = l1.next
l2 = l2.next
curr = curr.next
while l1:
sum = l1.val + carry
carry = sum / 10
curr.next = ListNode(sum % 10)
l1 = l1.next
curr = curr.next
while l2:
sum = l2.val + carry
carry = sum / 10
curr.next = ListNode(sum % 10)
l2 = l2.next
curr = curr.next
if carry > 0:
curr.next = ListNode(carry)
return head.next
|
SSCPS/bip-gam-v1
|
bip.py
|
Python
|
gpl-3.0
| 1,835
| 0.005995
|
#! /usr/bin/env python
#################################################################################################
#
# Script Name: bip.py
# Script Usage: This script is the menu system and runs everything else. Do not use other
# files unless you are comfortable with the code.
#
# It has the following:
# 1.
# 2.
# 3.
# 4.
#
# You will probably want to do the following:
# 1. Make sure the info in bip_config.py is correct.
# 2. Make sure GAM (Google Apps Manager) is installed and the path is correct.
# 3. Make sure the AD scripts in toos/ are present on the DC runn
|
ing run.ps1.
#
# Script Updates:
# 201709191243 - rdegennaro@sscps.org - copied boilerplate.
#
#################################################################################################
import os # os.system for clearing screen and simple gam calls
import subprocess # subprocess.Popen is to capture gam output (needed for user info in particular)
import MySQLdb # MySQLdb is to get data from
|
relevant tables
import csv # CSV is used to read output of drive commands that supply data in CSV form
import bip_config # declare installation specific variables
# setup for MySQLdb connection
varMySQLHost = bip_config.mysqlconfig['host']
varMySQLUser = bip_config.mysqlconfig['user']
varMySQLPassword = bip_config.mysqlconfig['password']
varMySQLDB = bip_config.mysqlconfig['db']
# setup to find GAM
varCommandGam = bip_config.gamconfig['fullpath']
#################################################################################################
#
#################################################################################################
|
ActiveState/code
|
recipes/Python/511434_Paint_10/recipe-511434.py
|
Python
|
mit
| 3,174
| 0.008507
|
HOST = '127.0.0.1'
PORT = 8080
from Tkinter import *
import tkColorChooser
import socket
import thread
import spots
################################################################################
def main():
global hold, fill, draw, look
hold = []
fill = '#000000'
connect()
root = Tk()
root.title('Paint 1.0')
root.resizable(False, False)
upper = LabelFrame(root, text='Your Canvas')
lower = LabelFrame(root, text='Thei
|
r Canvas')
draw = Canvas(upper, bg='#f
|
fffff', width=400, height=300, highlightthickness=0)
look = Canvas(lower, bg='#ffffff', width=400, height=300, highlightthickness=0)
cursor = Button(upper, text='Cursor Color', command=change_cursor)
canvas = Button(upper, text='Canvas Color', command=change_canvas)
draw.bind('<Motion>', motion)
draw.bind('<ButtonPress-1>', press)
draw.bind('<ButtonRelease-1>', release)
draw.bind('<Button-3>', delete)
upper.grid(padx=5, pady=5)
lower.grid(padx=5, pady=5)
draw.grid(row=0, column=0, padx=5, pady=5, columnspan=2)
look.grid(padx=5, pady=5)
cursor.grid(row=1, column=0, padx=5, pady=5, sticky=EW)
canvas.grid(row=1, column=1, padx=5, pady=5, sticky=EW)
root.mainloop()
################################################################################
def connect():
try:
start_client()
except:
start_server()
thread.start_new_thread(processor, ())
def start_client():
global QRI
server = socket.socket()
server.connect((HOST, PORT))
QRI = spots.qri(server)
def start_server():
global QRI
server = socket.socket()
server.bind(('', PORT))
server.listen(1)
QRI = spots.qri(server.accept()[0])
def processor():
while True:
ID, (func, args, kwargs) = QRI.query()
getattr(look, func)(*args, **kwargs)
def call(func, *args, **kwargs):
try:
QRI.call((func, args, kwargs), 0.001)
except:
pass
################################################################################
def change_cursor():
global fill
color = tkColorChooser.askcolor(color=fill)[1]
if color is not None:
fill = color
def change_canvas():
color = tkColorChooser.askcolor(color=draw['bg'])[1]
if color is not None:
draw['bg'] = color
draw.config(bg=color)
call('config', bg=color)
################################################################################
def motion(event):
if hold:
hold.extend([event.x, event.y])
event.widget.create_line(hold[-4:], fill=fill, tag='TEMP')
call('create_line', hold[-4:], fill=fill, tag='TEMP')
def press(event):
global hold
hold = [event.x, event.y]
def release(event):
global hold
if len(hold) > 2:
event.widget.delete('TEMP')
event.widget.create_line(hold, fill=fill, smooth=True)
call('delete', 'TEMP')
call('create_line', hold, fill=fill, smooth=True)
hold = []
def delete(event):
event.widget.delete(ALL)
call('delete', ALL)
################################################################################
if __name__ == '__main__':
main()
|
google/ml-metadata
|
ml_metadata/tools/documentation/build_docs.py
|
Python
|
apache-2.0
| 4,411
| 0.005214
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Script to generate api_docs for MLMD.
The script needs to be run under Python3.
The doc generator can be installed with:
```
$> pip3 install git+https://github.com/tensorflow/docs
```
To run from it on the mlmd pip package:
```
python3 ml_metadata/tools/documentation/build_docs.py --output_dir=/tmp/mlmd
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
import ml_metadata as mlmd
from google.protobuf.reflection import GeneratedProtocolMessageType
flags.DEFINE_string('output_dir', '/tmp/mlmd_api', 'Where to output the docs')
flags.DEFINE_string(
'code_url_prefix',
'https://github.com/google/ml-metadata/tree/master/ml_metadata',
'The url prefix for links to code.')
flags.DEFINE_bool('search_hints', True,
'Include metadata search hints in the generated files')
flags.DEFINE_string('site_path', 'ml_metadata/api_docs/python',
'Path prefix in the _toc.yaml')
FLAGS = flags.FLAGS
def ignore_proto_method(path, parent, children):
"""Remove all the proto inherited methods.
Args:
path: A tuple of name parts forming the attribute-lookup path to this
object. For `tf.keras.layers.Dense` path is:
|
("tf","keras","layers","Dense")
parent: The parent object.
children: A list of (name, value) pairs. The attributes of the patent.
Returns:
A filtered list of children `(name, value)` p
|
airs. With all proto methods
removed.
"""
del path
new_children = []
if not isinstance(parent, GeneratedProtocolMessageType):
return children
new_children = []
for (name, obj) in children:
if 'function' in str(obj.__class__):
continue
new_children.append((name, obj))
return new_children
def ignore_attrs_method(path, parent, children):
"""Remove auto generated attrs methods.
Args:
path: A tuple of name parts forming the attribute-lookup path to this
object. For `tf.keras.layers.Dense` path is:
("tf","keras","layers","Dense")
parent: The parent object.
children: A list of (name, value) pairs. The attributes of the patent.
Returns:
A filtered list of children `(name, value)` pairs. With all attrs auto
generated methods removed (e.g., __eq__, __ge__, __gt__)
"""
del path
del parent
new_children = []
for (name, obj) in children:
if name in ['__eq__', '__ge__', '__gt__', '__le__', '__lt__', '__ne__']:
continue
new_children.append((name, obj))
return new_children
def main(args):
if args[1:]:
raise ValueError('Unrecognized command line args', args[1:])
suppress_docs_for = []
for name in ['version', 'goo'+'gle', 'metadata_store', 'pywrap']:
submodule = getattr(mlmd, name, None)
if submodule is not None:
suppress_docs_for.append(submodule)
for obj in suppress_docs_for:
doc_controls.do_not_generate_docs(obj)
doc_generator = generate_lib.DocGenerator(
root_title='ML Metadata',
py_modules=[('mlmd', mlmd)],
base_dir=os.path.dirname(mlmd.__file__),
code_url_prefix=FLAGS.code_url_prefix,
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
private_map={},
callbacks=[
# This filters out objects not defined in the current module or its
# sub-modules.
public_api.local_definitions_filter, ignore_proto_method,
ignore_attrs_method
])
doc_generator.build(output_dir=FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
|
cswank/mongodoc
|
mongodoc/collection_doc.py
|
Python
|
mit
| 3,438
| 0.003781
|
import difflib, re
from pymongo.objectid import ObjectId
class CollectionDoc(object):
def __init__(self, db, docs, find_links=True):
self._db = db
self._collection_names = db.collection_names()
self._docs = docs
self._find_links = find_links
@property
def text(self):
text = ''
for doc in self._docs:
text += '{0}\n\n'.format(doc.text)
text = text.split('\n')
if self._find_links:
self._find_relationships(text)
return '\n'.join(text)
def _find_relationships(self, text):
level = 0
for doc in self._docs:
fields = self._find_link_f
|
ields(doc.doc)
level = self._make_links(doc.collection, fields, text, level)
def _make_links(self, doc_collection_name, fields, text, level):
for key, value in fields.iteritems():
collection_name = self._find_collection(key, value)
if col
|
lection_name is not None:
self._make_link(doc_collection_name, collection_name, key, value, text, level)
level += 1
return level
def _make_link(self, doc_collection_name, collection_name, key, value, text, level):
for collection_start, row in enumerate(text):
j = row.find('{0} '.format(doc_collection_name))
if -1 < j < 20:
break
for i, row in enumerate(text[collection_start:]):
j = row.find('{0}:'.format(key))
if -1 < j < 20:
break
found = False
for k, row in enumerate(text):
j = row.find('{0} '.format(collection_name))
if -1 < j < 20:
found=True
if found:
l = row.find('_id:')
if -1 < l < 20:
break
start, end = sorted([k, i + collection_start])
for i in xrange(len(text) - 1):
self._append_row(start, end, i, text, level)
def _append_row(self, start, end, i, text, level):
j = 3 * level
prefix = text[i][:j]
rest = text[i][j:]
if (i == start or i == end) and '|' not in prefix:
prefix = '+--{0}'.format(prefix.replace(' ', '-'))
elif (i == start or i == end) and '|' in prefix:
prefix = '+--{0}'.format(prefix.replace(' ', '-').replace('|', '+'))
elif start > i or i > end:
prefix = ' {0}'.format(prefix)
elif start < i or i < end:
prefix = '| {0}'.format(prefix)
text[i] = prefix + rest
def _find_collection(self, key, value):
scores = []
for collection_name in self._collection_names:
scores.append((collection_name, value, difflib.SequenceMatcher(None, collection_name, key).ratio()))
scores.sort(key=lambda x: x[2], reverse=True)
for item in scores:
collection_name = item[0]
value = item[1]
doc = self._db[collection_name].find_one({'_id': value})
if doc is None:
continue
else:
return collection_name
def _find_link_fields(self, doc):
fields = {}
for key, value in doc.iteritems():
if key == '_id':
continue
if isinstance(value, ObjectId):
fields[key] = value
return fields
|
lnybrave/zzbook
|
data/sync.py
|
Python
|
apache-2.0
| 2,231
| 0.00137
|
# !/usr/bin/python
# -*- coding=utf-8 -*-
import json
import urllib2
from books.models import Book
domain = "http://smartebook.zmapp.com:9026"
# 同步图书详情
def sync_book(bid, cm):
# 完结了的图书不更新信息
if Book.objects.filter(id=bid, status=1).count() == 0:
page = urllib2.urlopen("http://wap.cmread.com/r/p/viewdata.jsp?bid=%s&cm=%s&vt=9" % (bid, cm))
data = page.read()
try:
result = json.loads(data, encoding="utf-8")
print result
update = Book.objects.filter(id=bid).count() != 0
book = Book()
book.pk = int(bid)
book.name = result['showName']
book.brief = result['brief']
book.desc = result['desc']
book.cover_url = result['bigCoverLogo']
book.cover_url_small = result['smallCoverLogo']
book.status = result['status']
book.first_cid = result['firstChpaterCid']
book.last_cid = result['lastChapterCid']
book.chapter_size =
|
result['chapterSize']
book.score = result['score']
book.word_size = result['wordSize']
book.click_amount = result['clickValue']
book.kw = result['kw']
book.price = int(float(result['price']) * 100)
book.charge_mode = result['chargeMode']
if update:
book.save(force_update=update, update_fields=(
'name', 'brief', 'desc', 'cover_url', 'cover
|
_url_small', 'status', 'first_cid', 'last_cid',
'chapter_size', 'score', 'word_size', 'click_amount', 'kw', 'price', 'charge_mode'))
else:
book.save(force_insert=True)
return True
except Exception, e:
print e.message
return False
# 同步书架
def sync_bookshelf():
url = "%s/smart_book/get_bookshelf" % domain
page = urllib2.urlopen(url)
result = json.loads(page.read())
print result
books = result['bookshelf']
update_count = 0
for index, b in enumerate(books):
if sync_book(b['book_id'], 'zm'):
update_count += 1
return len(books), update_count
print sync_bookshelf()
|
IfcOpenShell/IfcOpenShell
|
src/ifcblender/io_import_scene_ifc/__init__.py
|
Python
|
lgpl-3.0
| 13,611
| 0.002057
|
# IfcBlender - Blender IFC Importer
# Copyright (C) 2019 Thomas Krijnen <thomas@aecgeeks.com>
#
# This file is part of IfcBlender.
#
# IfcBlender is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IfcBlender is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with IfcBlender. If not, see <http://www.gnu.org/licenses/>.
# <pep8 compliant>
###############################################################################
# #
# Based on the Wavefront OBJ File importer by Campbell Barton #
# #
###############################################################################
bl_info = {
"name": "IfcBlender",
"description": "Import files in the " "Industry Foundation Classes (.ifc) file format",
"author": "Thomas Krijnen, IfcOpenShell",
"blender": (2, 80, 0),
"location": "File > Import",
"tracker_url": "https://sourceforge.net/p/ifcopenshell/" "_list/tickets?source=navbar",
"category": "Import-Export",
}
if "bpy" in locals():
import importlib
if "ifcopenshell" in locals():
importlib.reload(ifcopenshell)
from bpy.props import (
BoolProperty,
IntProperty,
StringProperty,
)
from bpy_extras.io_utils import ImportHelper
from collections import defaultdict
import bpy
import logging
import mathutils
import os
major, minor = bpy.app.version[0:2]
transpose_matrices = minor >= 62
bpy.types.Object.ifc_id = IntProperty(name="IFC Entity ID", description="The STEP entity instance name")
bpy.types.Object.ifc_guid = StringProperty(name="IFC Entity GUID", description="The IFC Globally Unique Identifier")
bpy.types.Object.ifc_name = StringProperty(name="IFC Entity Name", description="The optional name attribute")
bpy.types.Object.ifc_type = StringProperty(name="IFC Entity Type", description="The STEP Datatype keyword")
def _get_parent(instance):
"""This is based on ifcopenshell.app.geom"""
if instance.is_a("IfcOpeningElement"):
# We skip opening elements as they are nameless.
# We use this function to get usable collections.
return _get_parent(instance.VoidsElements[0].RelatingBuildingElement)
if instance.is_a("IfcElement"):
fills = instance.FillsVoids
if len(fills):
return fills[0].RelatingOpeningElement
containments = instance.ContainedInStructure
if len(containments):
return containments[0].RelatingStructure
if instance.is_a("IfcObjectDefinition"):
decompositions = instance.Decomposes
if len(decompositions):
return decompositions[0].RelatingObject
def import_ifc(filename, use_names, process_relations, blender_booleans):
from . import ifcopenshell
from .ifcopenshell import geom as ifcopenshell_geom
print(f"Reading {bpy.path.basename(filename)}...")
settings = ifcopenshell_geom.settings()
settings.set(settings.DISABLE_OPENING_SUBTRACTIONS, blender_booleans)
assert os.path.exists(filename), filename
ifc_file = ifcopenshell.open(filename)
iterator = ifcopenshell_geom.iterator(settings, ifc_file)
valid_file = iterator.initialize()
if not valid_file:
return False
print("Done reading file")
id_to_object = defaultdict(list)
id_to_parent = {}
id_to_matrix = {}
openings = []
old_progress = -1
print("Creating geometry...")
root_collection = bpy.data.collections.new(f"{bpy.path.basename(filename)}")
bpy.context.scene.collection.children.link(root_collection)
collections = {0: root_collection}
def get_collection(cid):
if cid == 0:
return root_collection
collection = collections.get(cid)
if collection is None:
try:
ifc_object = ifc_file.by_id(cid)
except Exception as exc:
logging.exception(exc)
ifc_object = None
if ifc_object is not None:
# FIXME: I am really unsure if that is correct way to get parent object
ifc_parent_object = _get_parent(ifc_object)
parent_id = ifc_parent_object.id() if ifc_parent_object is not None else 0
parent_collection = get_collection(parent_id)
name = ifc_object.Name or f"{ifc_object.is_a()}[{cid}]"
else:
parent_collection = get_collection(0)
name = f"unresolved_{cid}"
collection = bpy.data.collections.new(name)
parent_collection.children.link(collection)
collections[cid] = collection
return collection
if process_relations:
rel_collection = bpy.data.collections.new("Relations")
collection.children.link(rel_collection)
project_meshes = dict()
while True:
ob = iterator.get()
f = ob.geometry.faces
v = ob.geometry.verts
mats = ob.geometry.materials
matids = ob.geometry.material_ids
m = ob.transformation.matrix.data
t = ob.type[0:21]
nm = ob.name if len(ob.name) and use_names else ob.guid
# MESH CREATION
# Depending on version, geometry.id will be either int or str
mesh_name = f"mesh-{ob.geometry.id}"
me = project_meshes.get(mesh_name)
if me is None:
verts = [[v[i], v[i + 1], v[i + 2]] for i in range(0, len(v), 3)]
faces = [[f[i], f[i + 1], f[i + 2]] for i in range(0, len(f), 3)]
me = bpy.data.meshes.new(mesh_name)
project_meshes[mesh_name] = me
me.from_pydata(verts, [], faces)
me.validate()
# MATERIAL CREATION
def add_material(mname, props):
if mname in bpy.data.materials:
mat = bpy.data.materials[mname]
mat.use_fake_user = True
else:
mat = bpy.data.materials.new(mname)
for k, v in props.items():
if k == "transparency":
mat.blend_method = "HASHED"
mat.use_screen_refraction = True
mat.refraction_depth = 0.1
mat.use_nodes = True
mat.node_tree.nodes["Principled BSDF"].inputs[15].default_value = v
else:
setattr(mat, k, v)
me.materials.append(mat)
needs_default = -1 in matids
if needs_default:
add_material(t, {})
for mat in mats:
props = {}
if mat.has_diffuse:
alpha = 1.0
if mat.has_transparency and mat.transparency > 0:
alpha = 1.0 - mat.transparency
props["diffuse_color"] = mat.diffuse + (alpha,)
# @todo
# if mat.has_specular:
# props['specular_color'] = mat.specular
# if mat.has_specularity:
# props['specular_intensity'] = mat.specularity
add_material(mat.name, props)
|
faces = me.polygons if hasattr(me, "polygons") else me.faces
if len(faces) == len(matids):
for face, matid in zip(faces, matids):
face.material_index = matid + (1 if needs_default else 0)
# OBJECT CREATION
bob = bpy.data.objects.new(nm, me)
mat = mathutils.Matrix(
([m[0], m[1], m[2], 0], [m[3], m[4], m[5], 0], [m[6], m[7], m[8], 0], [m[9], m[10], m[11], 1])
| |
jbloom/epitopefinder
|
scripts/epitopefinder_plotdistributioncomparison.py
|
Python
|
gpl-3.0
| 3,447
| 0.004642
|
#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for lin
|
e in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.i
|
o.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
|
asolntsev/selenium
|
py/test/runner/run_pytest.py
|
Python
|
apache-2.0
| 1,047
| 0
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed unde
|
r the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
with open("pytest.ini", "w") as ini_file:
ini_file.write("[pytest]\n")
ini_file.write("addopts = -r=a\n")
ini_file.write("rootdir = py\n")
ini_file.write("
|
python_files = test_*.py *_tests.py\n")
raise SystemExit(pytest.main())
|
CMaiku/inhibit-screensaver
|
inhibit-screensaver.py
|
Python
|
gpl-2.0
| 851
| 0.007051
|
#!/usr/bin/env python
import subprocess
import sys
from gi.repository import GLib, Gio
def main():
bus = Gio.bus_get_sync(Gio.BusType.SESSION, None)
pro
|
xy = Gio.DBusProxy.new_sync(bus, Gio.DBusProxyFlags.NONE, None,
'org.freedesktop.ScreenSaver', '/ScreenSaver',
'org.freedesktop.ScreenSaver', None)
cookie = proxy.Inhibit('(ss)', sys.argv[1],
"Wrapping this command in a sc
|
reensaver inhibitor")
print('Inhibited the screensaver')
try:
subprocess.call(sys.argv[1:])
finally:
proxy.UnInhibit('(u)', cookie)
print('UnInhibited the screensaver')
if __name__ == '__main__':
if len(sys.argv) >= 2:
main()
else:
import os.path
print("usage: {} <program-to-wrap> [arguments to pass to program]"
.format(os.path.basename(sys.argv[0])))
|
bvernoux/micropython
|
tests/float/inf_nan_arith.py
|
Python
|
mit
| 587
| 0.001704
|
# Test behaviour of inf and nan in basic float operations
inf = float("inf")
nan = float("nan")
values = (-2, -1, 0, 1, 2, inf, nan)
for x in values:
for y in values:
print(x, y)
print(" + - *", x + y, x - y, x * y)
try:
|
print(" /", x / y)
except ZeroDivisionError:
print(" / ZeroDivisionError")
try:
print(" ** pow", x ** y, pow(x, y))
except ZeroDivisionError:
|
print(" ** pow ZeroDivisionError")
print(" == != < <= > >=", x == y, x != y, x < y, x <= y, x > y, x >= y)
|
scaramallion/pynetdicom3
|
pynetdicom/pdu.py
|
Python
|
mit
| 68,107
| 0.000675
|
"""DICOM Upper Layer Protocol Data Units (PDUs).
There are seven different PDUs:
- A_ASSOCIATE_RQ
- A_ASSOCIATE_AC
- A_ASSOCIATE_RJ
- P_DATA_TF
- A_RELEASE_RQ
- A_RELEASE_RP
- A_ABORT_RQ
::
from_primitive encode
+----------------+ ------> +------------+ -----> +-------------+
| DUL Primit
|
ive | | PDU | | Peer AE |
+----------------+ <------ +------------+ <----- +-------------+
to_primitive decode
"""
import codecs
import logging
from struct import Struct
from pynetdicom.pdu_items import (
ApplicationContextItem,
PresentationContextItemRQ,
PresentationContextItemAC,
UserInformationItem
|
,
PresentationDataValueItem,
PDU_ITEM_TYPES
)
from pynetdicom.utils import validate_ae_title
LOGGER = logging.getLogger('pynetdicom.pdu')
# Predefine some structs to make decoding and encoding faster
UCHAR = Struct('B')
UINT2 = Struct('>H')
UINT4 = Struct('>I')
UNPACK_UCHAR = UCHAR.unpack
UNPACK_UINT2 = UINT2.unpack
UNPACK_UINT4 = UINT4.unpack
PACK_UCHAR = UCHAR.pack
PACK_UINT2 = UINT2.pack
PACK_UINT4 = UINT4.pack
class PDU(object):
"""Base class for PDUs.
Protocol Data Units (PDUs) are the message formats exchanged between peer
entities within a layer. A PDU consists of protocol control information
and user data. PDUs are constructed by mandatory fixed fields followed by
optional variable fields that contain one or more items and/or sub-items.
References
----------
DICOM Standard, Part 8, :dcm:`Section 9.3 <part08/sect_9.3.html>`
"""
def decode(self, bytestream):
"""Decode `bytestream` and use the result to set the field values of
the PDU.
Parameters
----------
bytestream : bytes
The PDU data to be decoded.
"""
for (offset, length), attr_name, func, args in self._decoders:
# Allow us to use None as a `length`
if length:
sl = slice(offset, offset + length)
else:
sl = slice(offset, None)
setattr(
self, attr_name, func(bytestream[sl], *args)
)
@property
def _decoders(self):
"""Return an iterable of tuples that contain field decoders."""
raise NotImplementedError
def encode(self):
"""Return the encoded PDU as :class:`bytes`.
Returns
-------
bytes
The encoded PDU.
"""
bytestream = bytes()
for attr_name, func, args in self._encoders:
# If attr_name is None then the field is usually reserved
if attr_name:
bytestream += func(getattr(self, attr_name), *args)
else:
bytestream += func(*args)
return bytestream
@property
def _encoders(self):
"""Return an iterable of tuples that contain field encoders."""
raise NotImplementedError
def __eq__(self, other):
"""Return ``True`` if `self` equals `other`."""
if other is self:
return True
# pylint: disable=protected-access
if isinstance(other, self.__class__):
self_dict = {
enc[0] : getattr(self, enc[0])
for enc in self._encoders if enc[0]
}
other_dict = {
enc[0] : getattr(other, enc[0])
for enc in other._encoders if enc[0]
}
return self_dict == other_dict
return NotImplemented
@staticmethod
def _generate_items(bytestream):
"""Yield PDU item data from `bytestream`.
Parameters
----------
bytestream : bytes
The encoded PDU variable item data.
Yields
------
int, bytes
The variable item's 'Item Type' parameter as int, and the item's
entire encoded data as bytes.
Notes
-----
Can be used with the following PDU items/sub-items:
- Application Context Item
- Presentation Context Item (RQ/AC)
- Abstract Syntax Sub-item
- Transfer Syntax Sub-item
- User Information Item
- Implementation Class UID Sub-item (RQ/AC)
- Implementation Version Name Sub-item (RQ/AC)
- Asynchronous Operations Window Sub-item (RQ/AC)
- SCP/SCU Role Selection Sub-item (RQ/AC)
- SOP Class Extended Negotiation Sub-item (RQ/AC)
- SOP Class Common Extended Negotiation Sub-item (RQ/AC)
- User Identity Sub-item (RQ/AC)
**Encoding**
When encoded, PDU item and sub-item data for the above has the
following structure, taken from various tables in (offsets shown
with Python indexing). Items are always encoded using Big Endian.
+--------+-------------+-------------+
| Offset | Length | Description |
+========+=============+=============+
| 0 | 1 | Item type |
+--------+-------------+-------------+
| 1 | 1 | Reserved |
+--------+-------------+-------------+
| 2 | 2 | Item length |
+--------+-------------+-------------+
| 4 | Item length | Item data |
+--------+-------------+-------------+
References
----------
* DICOM Standard, Part 8, :dcm:`Section 9.3 <part08/sect_9.3.html>`
* DICOM Standard, Part 8,
:dcm:`Section 9.3.1<part08/sect_9.3.html#sect_9.3.1>`
"""
offset = 0
while bytestream[offset:offset + 1]:
item_type = UNPACK_UCHAR(bytestream[offset:offset + 1])[0]
item_length = UNPACK_UINT2(bytestream[offset + 2:offset + 4])[0]
item_data = bytestream[offset:offset + 4 + item_length]
assert len(item_data) == 4 + item_length
yield item_type, item_data
# Move `offset` to the start of the next item
offset += 4 + item_length
def __len__(self):
"""Return the total length of the encoded PDU as :class:`int`."""
return 6 + self.pdu_length
def __ne__(self, other):
"""Return ``True`` if `self` does not equal `other`."""
return not self == other
@property
def pdu_length(self):
"""Return the *PDU Length* field value as :class:`int`."""
raise NotImplementedError
@property
def pdu_type(self):
"""Return the *PDU Type* field value as :class:`int`."""
return PDU_TYPES[self.__class__]
@staticmethod
def _wrap_bytes(bytestream):
"""Return `bytestream` without changing it."""
return bytestream
@staticmethod
def _wrap_encode_items(items):
"""Return `items` encoded as bytes.
Parameters
----------
items : list of PDU items
The items to encode.
Returns
-------
bytes
The encoded items.
"""
bytestream = bytes()
for item in items:
bytestream += item.encode()
return bytestream
@staticmethod
def _wrap_encode_uid(uid):
"""Return `uid` as bytes encoded using ASCII.
Each component of Application Context, Abstract Syntax and Transfer
Syntax UIDs should be encoded as a ISO 646:1990-Basic G0 Set Numeric
String (characters 0-9), with each component separated by '.' (0x2e)
.
'ascii' is chosen because this is the codec Python uses for ISO 646
[3]_.
Parameters
----------
uid : pydicom.uid.UID
The UID to encode using ASCII.
Returns
-------
bytes
The encoded `uid`.
References
----------
* DICOM Standard, Part 8, :dcm:`Annex F <part08/chapter_F.html>`
* `Python 3 codecs module
<https://docs.python.org/2/library/codecs.html#standard-encodings>`_
"""
return codecs.encode(uid, 'ascii')
def _wrap_generate_items(self, bytestream):
"""Return a list o
|
primecloud-controller-org/pcc-cli
|
src/pcc/api/instance/edit_instance_vmware.py
|
Python
|
apache-2.0
| 1,746
| 0.004009
|
# -*- coding: utf-8 -*-
def command():
return "edit-instance-vmware"
def init_argument(parser):
parser.add_argument("--instance-no", required=True)
parser.add_argument("--instance-type", required=True)
parser.add_argument("--key-name", required=True)
parser.add_argument("--compute-resource", required=True)
parser.add_argument("--is-static-ip", required=True)
parser.add_argument("--ip-address", required=False)
parser.add_argument("--subnet-mask", required=False)
parser.add_argument("--default-gateway", required=False)
parser.add_argument("--comment", required=False)
parser.add_argument("--root-size", required=False)
def execute(requester, args):
instance_no = args.instance_no
instance_type = args.instance_type
key_name = args.key_name
compute_resource = args.compute_resource
is_static_ip = args.is_static_ip
ip_address = args.ip_address
subnet_mask = args.subnet_mask
default_gateway = args.default_gateway
comment = args.comment
root_size = args.root_size
parameters = {}
parameters["InstanceNo"] = instance_no
parameters["InstanceType"] = instance_type
parameters["KeyName"] = key_name
parameters["ComputeResource"] = compute_resource
parameters["IsStaticIp"] = is
|
_static_ip
if (ip_address != None):
parameters["IpAddress"] = ip_address
if (subnet_mask != None):
parameters["SubnetMask"] = subnet_mask
if (default_gateway != None):
parameters["DefaultGateway"] = default_gateway
if (comment != None):
parameters["Comment"] = comment
if (root_size != None):
parameters["RootSize"] = root_size
return requester.execute("/Ed
|
itInstanceVmware", parameters)
|
HomeRad/TorCleaner
|
wc/dns/opcode.py
|
Python
|
gpl-2.0
| 2,630
| 0.003802
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Opcodes."""
import wc.dns.exception
QUERY = 0
IQUERY = 1
STATUS = 2
NOTIFY = 4
UPDATE = 5
_by_text = {
'QUERY' : QUERY,
'IQUERY' : IQUERY,
'STATUS' : STATUS,
'NOTIFY' : NOTIFY,
'UPDATE' : UPDATE
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
class UnknownOpcode(wc.dns.exception.DNSException):
"""Raised if an opcode is unknown."""
pass
def from_text(text):
"""Convert text into an opcode.
@param text: the textual opcode
@type text: string
@raises UnknownOpcode: the opcode is unknown
@rtype: int
"""
if text.isdigit():
value = int(text)
if value >= 0 and value <= 15:
return value
value = _by_text.get(text.upper())
if value is None:
raise UnknownOpcode
return value
def from_flags(flags):
"""Extract an opcode from DNS message flags.
@param flags: int
@rtype: int
"""
return (flags & 0x7800) >> 11
def to_flags(value):
"""Convert an op
|
code to a value suitable for ORing into DNS message
flags.
@rtype: int
"""
return (value << 11) &
|
0x7800
def to_text(value):
"""Convert an opcode to text.
@param value: the opcdoe
@type value: int
@raises UnknownOpcode: the opcode is unknown
@rtype: string
"""
text = _by_value.get(value)
if text is None:
text = str(value)
return text
def is_update(flags):
"""True if the opcode in flags is UPDATE.
@param flags: DNS flags
@type flags: int
@rtype: bool
"""
if (from_flags(flags) == UPDATE):
return True
return False
|
keleshev/schema
|
setup.py
|
Python
|
mit
| 1,562
| 0
|
import codecs
import sys
from setuptools import setup
version_file = "schema.py"
with open(version_file) as f:
for line in f.read().split("\n"):
if line.startswith("__version__ ="):
version = eval(line.split("=", 1)[1])
break
else:
print("No __version__ attribute found in %r" % version_file)
sys.exit(1)
setup(
name="schema",
version=version,
author="Vladimir Keleshev",
author_email="vladimir@keleshev.com",
description="Simple data validation library",
license="MIT",
keywords="schema json validation",
url="https://github.com/keleshev/schema",
py_modules=["schema"],
long_description=codecs.open("README.rst", "r", "utf-8").read(),
long_description_content_type="text/x-rst",
install_requires=open("requirements.txt", "r").read().split("\n"),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Pr
|
ogramming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3
|
.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: PyPy",
"License :: OSI Approved :: MIT License",
],
)
|
pierrejean-coudert/winlibre_pm
|
package_manager/smart/commands/install.py
|
Python
|
gpl-2.0
| 6,529
| 0.002144
|
#
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <niemeyer@conectiva.com>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.transaction import Transaction, PolicyInstall, sortUpgrades
from smart.transaction import INSTALL, REINSTALL
from smart.option import OptionParser
from smart.cache import Package
from smart import *
import string
import re
import os
USAGE=_("smart install [options] package ...")
DESCRIPTION=_("""
This command will install one or more packages in the
system. If a new version of an already installed package
is available, it will be selected for installation.
""")
EXAMPLES=_("""
smart install pkgname
smart install '*kgna*'
smart install pkgname-1.0
smart install pkgname-1.0-1
smart install pkgname1 pkgname2
smart install ./somepackage.file
smart install http://some.url/some/path/somepackage.file
""")
def parse_options(argv):
parser = OptionParser(usage=USAGE,
description=DESCRIPTION,
examples=EXAMPLES)
parser.add_option("--stepped", action="store_true",
help=_("split operation in steps"))
parser.add_option("--urls", action="store_true",
help=_("dump needed urls and don't commit operation"))
parser.add_option("--download", action="store_true",
help=_("download packages and don't commit operation"
|
))
parser.add_opti
|
on("--explain", action="store_true",
help=_("include additional information about changes,"
"when possible"))
parser.add_option("-y", "--yes", action="store_true",
help=_("do not ask for confirmation"))
parser.add_option("--dump", action="store_true",
help=_("dump package names and versions to stderr but "
"don't commit operation"))
opts, args = parser.parse_args(argv)
opts.args = args
return opts
def main(ctrl, opts):
if opts.explain:
sysconf.set("explain-changesets", True, soft=True)
urls = []
for arg in opts.args[:]:
if (os.path.isfile(arg) and
'/' in arg or filter(None, hooks.call("check-package-file", arg))):
ctrl.addFileChannel(arg)
opts.args.remove(arg)
elif ":/" in arg:
urls.append(arg)
if urls:
succ, fail = ctrl.downloadURLs(urls, _("packages"),
targetdir=os.getcwd())
if fail:
raise Error, _("Failed to download packages:\n") + \
"\n".join([" %s: %s" % (url, fail[url])
for url in fail])
for url, file in succ.items():
ctrl.addFileChannel(file)
opts.args.remove(url)
ctrl.reloadChannels()
cache = ctrl.getCache()
trans = Transaction(cache, PolicyInstall)
for channel in ctrl.getFileChannels():
for loader in channel.getLoaders():
for pkg in loader.getPackages():
if pkg.installed:
raise Error, _("%s is already installed") % pkg
trans.enqueue(pkg, INSTALL)
for arg in opts.args:
ratio, results, suggestions = ctrl.search(arg)
if not results:
if suggestions:
dct = {}
for r, obj in suggestions:
if isinstance(obj, Package):
dct[obj] = True
else:
dct.update(dict.fromkeys(obj.packages, True))
raise Error, _("'%s' matches no packages. "
"Suggestions:\n%s") % \
(arg, "\n".join([" "+str(x) for x in dct]))
else:
raise Error, _("'%s' matches no packages") % arg
pkgs = []
for obj in results:
if isinstance(obj, Package):
pkgs.append(obj)
if not pkgs:
installed = False
names = {}
for obj in results:
for pkg in obj.packages:
if pkg.installed:
iface.warning(_("%s (for %s) is already installed")
% (pkg, arg))
installed = True
break
else:
pkgs.append(pkg)
names[pkg.name] = True
else:
continue
break
if installed:
continue
if len(names) > 1:
raise Error, _("There are multiple matches for '%s':\n%s") % \
(arg, "\n".join([" "+str(x) for x in pkgs]))
if len(pkgs) > 1:
sortUpgrades(pkgs)
names = {}
for pkg in pkgs:
names.setdefault(pkg.name, []).append(pkg)
for name in names:
pkg = names[name][0]
if pkg.installed:
iface.warning(_("%s is already installed") % pkg)
else:
trans.enqueue(pkg, INSTALL)
iface.showStatus(_("Computing transaction..."))
trans.run()
iface.hideStatus()
if trans:
confirm = not opts.yes
print opts.urls, opts.dump, opts.download, opts.stepped, confirm
if opts.urls:
ctrl.dumpTransactionURLs(trans)
elif opts.dump:
ctrl.dumpTransactionPackages(trans, install=True)
elif opts.download:
ctrl.downloadTransaction(trans, confirm=confirm)
elif opts.stepped:
ctrl.commitTransactionStepped(trans, confirm=confirm)
else:
ctrl.commitTransaction(trans, confirm=confirm)
# vim:ts=4:sw=4:et
|
kako-nawao/django-group-by
|
django_group_by/mixin.py
|
Python
|
mit
| 2,630
| 0.00038
|
"""
This module contains the final mixin implementation, for whatever version
of Django is present.
"""
from django.db.models import ForeignKey, ManyToManyField
try:
# Django 1.9+
from .iterable import GroupByIterableMixinBase as GroupByMixinBase
except ImportError:
# Django 1.8-
from .queryset import GroupByQuerySetMixinBase as GroupByMixinBase
class GroupByMixin(GroupByMixinBase):
"""
QuerySet mixin that adds a group_by() method, similar to values() but
which returns AggregatedGroup instances when iterated instead of
dictionaries.
"""
@classmethod
def _expand_group_by_fields(cls, model, fields):
"""
Expand FK fields into all related object's fields to avoid future
lookups.
:param fields: fields to "group by"
:return: expanded fields
"""
# Containers for resulting fields and related model fields
res = []
related = {}
# Add own fields and populate related fields
for field_name in fields:
if '__' in field_name:
# Related model field: append to related model's fields
fk_field_name, related_field = field_name.split('__', 1)
if fk_field_name not in related:
related[fk_field_name] = [related_field]
else:
related[fk_field_name].append(related_field)
else:
# Simple field, get the field instance
model_field = model._meta.get_field(field_name)
if isinstance(model_field, (ForeignKey, ManyToManyField)):
# It's a related field, get model
related_model = model_field.related_model
# Append all its fields with the correct prefix
res.extend('{}__{}'.format(field_name, f.column)
for f in related_model._meta.fields)
else:
# It's a common field, just append it
res.append(field_name)
# Resolve all related fields
for fk_field_name, field_names in rela
|
ted.items():
# Get field
fk = model._meta.get_field(fk_field_name)
# Get all fields for that related model
related_fields = cls._expand_group_by_fields(fk.related_model,
field_names)
# Append them with the correct prefix
res.extend('{}__{}'.format(fk_field_name, f) for f in related_
|
fields)
# Return all fields
return res
|
Orochimarufan/youtube-dl
|
youtube_dl/extractor/soundcloud.py
|
Python
|
unlicense
| 29,898
| 0.001138
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
from .common import (
InfoExtractor,
SearchInfoExtractor
)
from ..compat import (
compat_HTTPError,
compat_kwargs,
compat_str,
compat_urlparse,
)
from ..utils import (
error_to_compat_str,
ExtractorError,
f
|
loat_or_none,
HEADRequest,
int_or_none,
KNOWN_EXTENSIONS,
mimetype2ext,
str_or_none,
try_get,
unified_timestamp,
update_url_query,
url_or_none,
urlhandle_detect_ext,
)
class SoundcloudEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:w|player|p)\.soundcloud\.com/player/?.*?\burl=(?P<id>.+)'
_TEST = {
# from https://www.soundi.fi/uutiset/ennakkokuuntelussa-timo-kaukolamme
|
n-station-to-station-to-station-julkaisua-juhlitaan-tanaan-g-livelabissa/
'url': 'https://w.soundcloud.com/player/?visual=true&url=https%3A%2F%2Fapi.soundcloud.com%2Fplaylists%2F922213810&show_artwork=true&maxwidth=640&maxheight=960&dnt=1&secret_token=s-ziYey',
'only_matching': True,
}
@staticmethod
def _extract_urls(webpage):
return [m.group('url') for m in re.finditer(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?://)?(?:w\.)?soundcloud\.com/player.+?)\1',
webpage)]
def _real_extract(self, url):
query = compat_urlparse.parse_qs(
compat_urlparse.urlparse(url).query)
api_url = query['url'][0]
secret_token = query.get('secret_token')
if secret_token:
api_url = update_url_query(api_url, {'secret_token': secret_token[0]})
return self.url_result(api_url)
class SoundcloudIE(InfoExtractor):
"""Information extractor for soundcloud.com
To access the media, the uid of the song and a stream token
must be extracted from the page source and the script must make
a request to media.soundcloud.com/crossdomain.xml. Then
the media can be grabbed by requesting from an url composed
of the stream token and uid
"""
_VALID_URL = r'''(?x)^(?:https?://)?
(?:(?:(?:www\.|m\.)?soundcloud\.com/
(?!stations/track)
(?P<uploader>[\w\d-]+)/
(?!(?:tracks|albums|sets(?:/.+?)?|reposts|likes|spotlight)/?(?:$|[?#]))
(?P<title>[\w\d-]+)/?
(?P<token>[^?]+?)?(?:[?].*)?$)
|(?:api(?:-v2)?\.soundcloud\.com/tracks/(?P<track_id>\d+)
(?:/?\?secret_token=(?P<secret_token>[^&]+))?)
)
'''
IE_NAME = 'soundcloud'
_TESTS = [
{
'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
'md5': 'ebef0a451b909710ed1d7787dddbf0d7',
'info_dict': {
'id': '62986583',
'ext': 'mp3',
'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1',
'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d',
'uploader': 'E.T. ExTerrestrial Music',
'uploader_id': '1571244',
'timestamp': 1349920598,
'upload_date': '20121011',
'duration': 143.216,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
}
},
# geo-restricted
{
'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep',
'info_dict': {
'id': '47127627',
'ext': 'mp3',
'title': 'Goldrushed',
'description': 'From Stockholm Sweden\r\nPovel / Magnus / Filip / David\r\nwww.theroyalconcept.com',
'uploader': 'The Royal Concept',
'uploader_id': '9615865',
'timestamp': 1337635207,
'upload_date': '20120521',
'duration': 227.155,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# private link
{
'url': 'https://soundcloud.com/jaimemf/youtube-dl-test-video-a-y-baw/s-8Pjrp',
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
'info_dict': {
'id': '123998367',
'ext': 'mp3',
'title': 'Youtube - Dl Test Video \'\' Ä↭',
'description': 'test chars: \"\'/\\ä↭',
'uploader': 'jaimeMF',
'uploader_id': '69767071',
'timestamp': 1386604920,
'upload_date': '20131209',
'duration': 9.927,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# private link (alt format)
{
'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp',
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
'info_dict': {
'id': '123998367',
'ext': 'mp3',
'title': 'Youtube - Dl Test Video \'\' Ä↭',
'description': 'test chars: \"\'/\\ä↭',
'uploader': 'jaimeMF',
'uploader_id': '69767071',
'timestamp': 1386604920,
'upload_date': '20131209',
'duration': 9.927,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# downloadable song
{
'url': 'https://soundcloud.com/oddsamples/bus-brakes',
'md5': '7624f2351f8a3b2e7cd51522496e7631',
'info_dict': {
'id': '128590877',
'ext': 'mp3',
'title': 'Bus Brakes',
'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66',
'uploader': 'oddsamples',
'uploader_id': '73680509',
'timestamp': 1389232924,
'upload_date': '20140109',
'duration': 17.346,
'license': 'cc-by-sa',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# private link, downloadable format
{
'url': 'https://soundcloud.com/oriuplift/uponly-238-no-talking-wav/s-AyZUd',
'md5': '64a60b16e617d41d0bef032b7f55441e',
'info_dict': {
'id': '340344461',
'ext': 'wav',
'title': 'Uplifting Only 238 [No Talking] (incl. Alex Feed Guestmix) (Aug 31, 2017) [wav]',
'description': 'md5:fa20ee0fca76a3d6df8c7e57f3715366',
'uploader': 'Ori Uplift Music',
'uploader_id': '12563093',
'timestamp': 1504206263,
'upload_date': '20170831',
'duration': 7449.096,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# no album art, use avatar pic for thumbnail
{
'url': 'https://soundcloud.com/garyvee/sideways-prod-mad-real',
'md5': '59c7872bc44e5d99b7211891664760c2',
'info_dict': {
'id': '309699954',
'ext': 'mp3',
'title': 'Sideways (Prod. Mad Real)',
'description': 'md5:d41d8cd98f0
|
ignaeche/barf-project
|
barf/examples/arm/translate_smt.py
|
Python
|
bsd-2-clause
| 1,055
| 0.000948
|
#! /usr/bin/env python
import os
import sys
from barf.barf import BARF
if __name__ == "__main__":
#
# Open file
#
try:
filename = os.path.abspath("../../samples/toy/arm/branch4")
barf = BARF(filename)
except Exception as err:
print err
print "[-] Error opening file : %s" % filename
sys.exit(1)
#
# Translate to REIL
#
print("[+] Translating: x86 -> REIL -> SMT...")
for addr, asm_instr, reil_instrs in barf.translate():
print("0x{0:08x} : {1}".format(addr, asm_instr))
for reil_instr in reil_instrs:
print("{0:14}{1}".format("", reil_instr))
try:
# Some instructions cannot be translate to SMT, i.e,
# UNKN, UNDEF, JCC. In those cases, an exception is
# raised.
smt_exprs = barf.smt_translator.translate(reil_instr)
|
for smt_expr in smt_exprs:
print("{0:16}{1}".format("", s
|
mt_expr))
except:
pass
|
rodo/django-perf
|
foo/hotel/urls.py
|
Python
|
gpl-3.0
| 544
| 0.003676
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.generic import ListView
from django.views.generic.detail import DetailView
from foo.hotel.models import Hotel
from foo.hotel.views import HotelLimitListView
from foo.hotel.v
|
iews import HotelLimitNoOrderListView
urlpatterns = patterns(
|
'',
url(r'^$', HotelLimitListView.as_view(model=Hotel), name='hotel'),
url(r'^noorder$', HotelLimitNoOrderListView.as_view(model=Hotel), name='hotelnoorder'))
|
mikalstill/nova
|
nova/objects/migration_context.py
|
Python
|
apache-2.0
| 3,456
| 0.000289
|
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific l
|
anguage governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova.db import api as db
from nova import exception
from nova.objects import base
from nova.objects import fields
@base.NovaObjectRegistry.register
class MigrationContext(base.NovaPersistentObject, base.NovaObj
|
ect):
"""Data representing additional resources related to a migration.
Some resources cannot be calculated from knowing the flavor alone for the
purpose of resources tracking, but need to be persisted at the time the
claim was made, for subsequent resource tracking runs to be consistent.
MigrationContext objects are created when the claim is done and are there
to facilitate resource tracking and final provisioning of the instance on
the destination host.
"""
# Version 1.0: Initial version
# Version 1.1: Add old/new pci_devices and pci_requests
VERSION = '1.1'
fields = {
'instance_uuid': fields.UUIDField(),
'migration_id': fields.IntegerField(),
'new_numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'old_numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'new_pci_devices': fields.ObjectField('PciDeviceList',
nullable=True),
'old_pci_devices': fields.ObjectField('PciDeviceList',
nullable=True),
'new_pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
'old_pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
}
@classmethod
def obj_make_compatible(cls, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1):
primitive.pop('old_pci_devices', None)
primitive.pop('new_pci_devices', None)
primitive.pop('old_pci_requests', None)
primitive.pop('new_pci_requests', None)
@classmethod
def obj_from_db_obj(cls, db_obj):
primitive = jsonutils.loads(db_obj)
return cls.obj_from_primitive(primitive)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['migration_context'])
if not db_extra:
raise exception.MigrationContextNotFound(
instance_uuid=instance_uuid)
if db_extra['migration_context'] is None:
return None
return cls.obj_from_db_obj(db_extra['migration_context'])
|
donkirkby/djsquash
|
fruit/migrations/0100_prepare_squash.py
|
Python
|
mit
| 274
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migration
|
s.Migration):
dependencies
|
= [
('fruit', '0003_apple_size'),
('fruit', '0002_cranberry_bacon'),
]
operations = [
]
|
ScreamingUdder/mantid
|
scripts/AbinsModules/GeneralAbInitioProgram.py
|
Python
|
gpl-3.0
| 10,789
| 0.00482
|
from __future__ import (absolute_import, division, print_function)
from mantid.kernel import logger
import AbinsModules
import six
from mantid.kernel import Atom
class GeneralAbInitioProgramName(type):
def __str__(self):
return self.__name__
# noinspection PyMethodMayBeStatic
@six.add_metaclass(GeneralAbInitioProgramName)
class GeneralAbInitioProgram(object):
"""
A general class which groups all methods which should be inherited or implemented by an ab initio program used
in INS analysis.
|
"""
def __init__(self, input_ab_initio_filename=None):
self._num_k = None
self._num_atoms = None
|
self._sample_form = None
self._ab_initio_program = None
self._clerk = AbinsModules.IOmodule(input_filename=input_ab_initio_filename,
group_name=AbinsModules.AbinsParameters.ab_initio_group)
def read_vibrational_or_phonon_data(self):
"""
This method is different for different ab initio programs. It has to be overridden by inheriting class.
This method reads vibrational or phonon data produced by an ab initio program.
This method should do the following:
1) Open file with vibrational or phonon data (CASTEP: foo.phonon). Name of a file should be stored in
self._input_filename. There must be no spaces in the name
of a file. Extension of a file (part of a name after '.') is arbitrary.
2) Method should read from an ab initio file information about frequencies, atomic displacements,
k-point vectors, weights of k-points and ions.
3) Method should reconstruct data for symmetry equivalent k-points
(protected method _recover_symmetry_points).
**Notice: this step is not implemented now. At the moment only Gamma point calculations are supported.**
4) Method should determine symmetry equivalent atoms
**Notice: this step is not implemented now.**
5) Method should calculate hash of a file with vibrational or phonon data (protected method _calculateHash).
6) Method should store vibrational or phonon data in an hdf file (inherited method save()). The name of an hdf file is
foo.hdf5 (CASTEP: foo.phonon -> foo.hdf5). In order to save the data to hdf file the following fields
should be set:
self._hdf_filename
self._group_name
self._attributes
self._datasets
The datasets should be a dictionary with the following entries:
"frequencies" - frequencies for all k-points grouped in one numpy.array in cm^-1
"weights" - weights of all k-points in one numpy.array
"k_vectors" - all k-points in one numpy array
**Notice: both symmetry equivalent and inequivalent points should be stored; at
the moment only Gamma point calculations are supported**
"atomic_displacements" - atomic displacements for all atoms and all k-points in one numpy array
"unit_cell" - numpy array with unit cell vectors in Angstroms
The following structured datasets should be also defined:
"atoms" - Python dictionary with the information about ions. Each entry in the
dictionary has the following format 'atom_n'. Here n means number of
atom in the unit cell.
Each entry 'atom_n' in the dictionary is a dictionary with the following
entries:
"symbol" - chemical symbol of the element (for example hydrogen -> H)
"sort" - defines symmetry equivalent atoms, e.g, atoms with the same
sort are symmetry equivalent
**Notice at the moment this parameter is not functional
in LoadCastep**
"coord" - equilibrium position of atom in Angstroms;
it has a form of numpy array with three floats
"mass" - mass of atom
The attributes should be a dictionary with the following entries:
"hash" - hash of a file with the vibrational or phonon data. It should be a string
representation of hash.
"ab_initio_program" - name of the ab initio program which was used to obtain vibrational or
phonon data (for CASTEP -> CASTEP).
"filename" - name of input ab initio file
For more details about these fields please look at the documentation of IOmodule class.
:returns: Method should return an object of type AbinsData.
"""
return None
def load_formatted_data(self):
"""
Loads data from hdf file. After data is loaded it is put into AbinsData object.
:returns: object of type AbinsData
"""
data = self._clerk.load(list_of_datasets=["frequencies", "weights", "k_vectors",
"atomic_displacements", "unit_cell", "atoms"])
datasets = data["datasets"]
self._num_k = datasets["k_vectors"].shape[0]
self._num_atoms = len(datasets["atoms"])
loaded_data = {"frequencies": datasets["frequencies"],
"weights": datasets["weights"],
"k_vectors": datasets["k_vectors"],
"atomic_displacements": datasets["atomic_displacements"],
"unit_cell": datasets["unit_cell"],
"atoms": datasets["atoms"]}
return self._rearrange_data(data=loaded_data)
# Protected methods which should be reused by classes which read ab initio phonon data
def _recover_symmetry_points(self, data=None):
"""
This method reconstructs symmetry equivalent k-points.
:param data: dictionary with the data for only symmetry inequivalent k-points. This methods
adds to this dictionary phonon data for symmetry equivalent k-points.
"""
pass
def _rearrange_data(self, data=None):
"""
This method rearranges data read from input ab initio file.
:param data: dictionary with the data to rearrange
:returns: Returns an object of type AbinsData
"""
k_points = AbinsModules.KpointsData(num_atoms=self._num_atoms, num_k=self._num_k)
# 1D [k] (one entry corresponds to weight of one k-point)
k_points.set({"weights": data["weights"],
# 2D [k][3] (one entry corresponds to one coordinate of particular k-point)
"k_vectors": data["k_vectors"],
# 2D array [k][freq] (one entry corresponds to one frequency for the k-point k)
"frequencies": data["frequencies"],
# 4D array [k][atom_n][freq][3] (one entry corresponds to
# one coordinate for atom atom_n, frequency freq and k-point k )
"atomic_displacements": data["atomic_displacements"],
"unit_cell": data["unit_cell"]
})
atoms = AbinsModules.AtomsDaTa(num_atoms=self._num_atoms)
atoms.set(data["atoms"])
result_data = AbinsModules.AbinsData()
result_data.set(k_points_data=k_points, atoms_data=atoms)
return result_data
def save_ab_initio_data(self, data=None):
"""
Saves ab initio data to an HDF5 file.
:pa
|
coati-00/nepi
|
nepi/main/tests/test_models.py
|
Python
|
gpl-2.0
| 11,699
| 0
|
from datetime import date
import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from pagetree.models import Hierarchy, Section, UserPageVisit
from pagetree.tests.factories import HierarchyFactory, ModuleFactory
from factories import SchoolGroupFactory
from nepi.main.models import AggregateQuizScore, PendingTeachers, Country
from nepi.main.tests.factories import StudentProfileFactory, \
TeacherProfileFactory, ICAPProfileFactory, \
CountryAdministratorProfileFactory, \
SchoolFactory, InstitutionAdminProfileFactory, CountryFactory
class TestCountry(TestCase):
def test_choices(self):
country1 = CountryFactory(display_name="Beta")
country2 = CountryFactory(display_name="Alpha")
choices = Country.choices()
self.assertEquals(len(choices), 2)
self.assertEquals(choices[0], (country2.name, country2.display_name))
self.assertEquals(choices[1], (country1.name, country1.display_name))
class TestGroup(TestCase):
def test_unicode(self):
grp = SchoolGroupFactory()
self.assertEqual(str(grp), grp.name)
def test_format_time(self):
start = date(2007, 1, 5)
end = date(2007, 12, 25)
grp = SchoolGroupFactory(start_date=start, end_date=end)
self.assertEquals(grp.formatted_start_date(), "01/05/2007")
self.assertEquals(grp.formatted_end_date(), "12/25/2007")
def test_is_active(self):
start = date(2007, 1, 5)
end = date(2007, 12, 25)
grp = SchoolGroupFactory(start_date=start, end_date=end)
self.assertFalse(grp.is_active())
delta = datetime.timedelta(days=-90)
grp.end_date = datetime.date.today() + delta
self.assertTrue(grp.is_active())
delta = datetime.timedelta(days=90)
grp.end_date = datetime.date.today() + delta
self.assertTrue(grp.is_active())
def test_students(self):
grp = SchoolGroupFactory()
icap = ICAPProfileFactory()
country = CountryAdministratorProfileFactory()
dean = InstitutionAdminProfileFactory()
teacher = TeacherProfileFactory()
student = StudentProfileFactory()
icap.group.add(grp)
country.group.add(grp)
dean.group.add(grp)
teacher.group.add(grp)
student.group.add(grp)
self.assertEquals(grp.students().count(), 1)
class TestUserProfile(TestCase):
def setUp(self):
self.student = StudentProfileFactory().user
self.teacher = TeacherProfileFactory().user
self.school_admin = InstitutionAdminProfileFactory().user
self.country_admin = CountryAdministratorProfileFactory().user
self.icap = ICAPProfileFactory().user
ModuleFactory("main", "/")
self.hierarchy = Hierarchy.objects.get(name='main')
def test_user_profile_unis(self):
self.assertEquals(unicode(self.student), self.student.username)
def test_display_name(self):
self.assertEquals(self.student.profile.display_name(),
self.student.username)
def test_user_profile_roles(self):
self.assertTrue(self.student.profile.is_student())
self.assertFalse(self.teacher.p
|
rofile.is_student())
self.assertFalse(self.school_admin.profile.is_student())
self.assertFalse(self.country_admin.profile.is_student())
self.assertFalse(self.icap.profile.is_student())
self.assertFalse(self.student.profile.is
|
_teacher())
self.assertTrue(self.teacher.profile.is_teacher())
self.assertFalse(self.school_admin.profile.is_teacher())
self.assertFalse(self.country_admin.profile.is_teacher())
self.assertFalse(self.icap.profile.is_teacher())
self.assertFalse(self.student.profile.is_institution_administrator())
self.assertFalse(self.teacher.profile.is_institution_administrator())
self.assertTrue(
self.school_admin.profile.is_institution_administrator())
self.assertFalse(
self.country_admin.profile.is_institution_administrator())
self.assertFalse(self.icap.profile.is_institution_administrator())
self.assertFalse(self.student.profile.is_country_administrator())
self.assertFalse(self.teacher.profile.is_country_administrator())
self.assertFalse(self.school_admin.profile.is_country_administrator())
self.assertTrue(self.country_admin.profile.is_country_administrator())
self.assertFalse(self.icap.profile.is_country_administrator())
self.assertFalse(self.student.profile.is_icap())
self.assertFalse(self.teacher.profile.is_icap())
self.assertFalse(self.school_admin.profile.is_icap())
self.assertFalse(self.country_admin.profile.is_icap())
self.assertTrue(self.icap.profile.is_icap())
self.assertEquals(self.student.profile.role(), 'student')
self.assertEquals(self.teacher.profile.role(), 'faculty')
self.assertEquals(self.country_admin.profile.role(), 'country')
self.assertEquals(self.icap.profile.role(), 'icap')
def test_last_location(self):
self.assertEquals(self.student.profile.last_location(self.hierarchy),
self.hierarchy.get_root())
section = Section.objects.get(slug='two')
UserPageVisit.objects.create(user=self.student, section=section)
self.assertEquals(self.student.profile.last_location(self.hierarchy),
section)
def test_percent_complete(self):
root = self.hierarchy.get_root()
self.assertEquals(self.student.profile.percent_complete(root), 0)
# visit section one & child one
section_one = Section.objects.get(slug='one')
child_one = Section.objects.get(slug='introduction')
UserPageVisit.objects.create(user=self.student, section=section_one)
UserPageVisit.objects.create(user=self.student, section=child_one)
self.assertEquals(self.student.profile.percent_complete(root), 50)
def test_percent_complete_session(self):
section_one = Section.objects.get(slug='one')
child_one = Section.objects.get(slug='introduction')
pct = self.student.profile.percent_complete(section_one)
self.assertEquals(pct, 0)
UserPageVisit.objects.create(user=self.student, section=section_one)
pct = self.student.profile.percent_complete(section_one)
self.assertEquals(pct, 0)
UserPageVisit.objects.create(user=self.student, section=child_one)
pct = self.student.profile.percent_complete(section_one)
self.assertEquals(pct, 100)
def test_sessions_completed(self):
section_one = Section.objects.get(slug='one')
child_one = Section.objects.get(slug='introduction')
self.assertEquals(self.student.profile.sessions_completed(
self.hierarchy), 2)
UserPageVisit.objects.create(user=self.student, section=section_one)
UserPageVisit.objects.create(user=self.student, section=child_one)
self.assertEquals(
self.student.profile.sessions_completed(self.hierarchy), 3)
def test_joined_groups(self):
group = SchoolGroupFactory()
self.assertEquals(self.student.profile.joined_groups().count(), 0)
self.student.profile.group.add(group)
self.assertEquals(self.student.profile.joined_groups().count(), 1)
group.archived = True
group.save()
self.assertEquals(self.student.profile.joined_groups().count(), 0)
def test_managed_groups(self):
teacher = TeacherProfileFactory().user
teacher_grp = SchoolGroupFactory(creator=teacher)
alt_teacher = TeacherProfileFactory().user # test noise
alt_teacher_grp = SchoolGroupFactory(creator=alt_teacher,
school=teacher_grp.school)
school = InstitutionAdminProfileFactory(
country=teacher_grp.school.country, school=teacher_grp.school).user
school_grp = SchoolGroupFactory(creator=school,
school=teacher_grp.school)
|
joferkington/oost_paper_code
|
invert_slip_fixed_azimuth.py
|
Python
|
mit
| 966
| 0.003106
|
"""
Restores the
|
uplifted horizons while restricting slip along the fault to the
specified azimuth.
"""
import numpy as np
|
import matplotlib.pyplot as plt
from fault_kinematics.homogeneous_simple_shear import invert_slip
import data
import basic
def main():
azimuth = data.fault_strike + 90
# azimuth = 304 # Plate motion from Loveless & Meade
def func(*args, **kwargs):
return forced_direction_inversion(azimuth, *args, **kwargs)
slips, heaves, variances, planar_variances = basic.restore_horizons(func)
basic.plot_restored_locations(slips, heaves)
plt.show()
def forced_direction_inversion(azimuth, fault, xyz, alpha, **kwargs):
"""Forces the inversion to only consider slip along the given azimuth."""
azimuth = np.radians(90 - azimuth)
dx, dy = np.cos(azimuth), np.sin(azimuth)
direc = [[dx, dy], [dx, dy]]
return invert_slip(fault, xyz, alpha, direc=direc, **kwargs)
if __name__ == '__main__':
main()
|
Ensembles/ert
|
python/python/ert/ecl/fortio.py
|
Python
|
gpl-3.0
| 7,299
| 0.004384
|
# Copyright (C) 2011 Statoil ASA, Norway.
#
# The file 'fortio.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module to support transparent binary IO of Fortran created files.
Fortran is a a funny language; when writing binary blobs of data to
file the Fortran runtime will silently add a header and footer around
the date. The Fortran code:
integer array(100)
write(unit) array
it actually writes a head and tail in addition to the actual
data. The header and tail is a 4 byte integer, which value is the
number of bytes in the immediately following record. I.e. what is
actually found on disk after the Fortran code above is:
| 400 | array ...... | 400 |
The fortio.c file implements the fortio_type C structure which can be
used to read and write these structures transparently. The current
python module is a minimal wrapping of this datastructure; mainly to
support passing of FortIO handles to the underlying C functions. A
more extensive wrapping of the fortio implementation would be easy.
"""
import ctypes
import os
import sys
from cwrap import BaseCClass
from ert.ecl import EclPrototype
class FortIO(BaseCClass):
TYPE_NAME = "fortio"
READ_MODE = 1
WRITE_MODE = 2
READ_AND_WRITE_MODE = 3
APPEND_MODE = 4
_open_reader = EclPrototype("void* fortio_open_reader(char*, bool, bool)", bind=False)
_open_writer = EclPrototype("void* fortio_open_writer(char*, bool, bool)", bind=False)
_open_readwrite = EclPrototype("void* fortio_open_readwrite(char*, bool, bool)", bind=False)
_open_append = EclPrototype("void* fortio_open_append(char*, bool, bool)", bind=False)
_guess_fortran = EclPrototype("bool fortio_looks_like_fortran_file(char*, bool)", bind=False)
_write_record = EclPrototype("void fortio_fwrite_record(fortio, char*, int)")
_get_position = EclPrototype("long fortio_ftell(fortio)")
_seek = EclPrototype("void fortio_fseek(fortio, long, int)")
_close = EclPrototype("bool fortio_fclose(fortio)")
_truncate = EclPrototype("bool fortio_ftruncate(fortio, long)")
_filename = EclPrototype("char* fortio_filename_ref(fortio)")
def __init__(self, file_name, mode=READ_MODE, fmt_file=False, endian_flip_header=True):
"""Will open a new FortIO handle to @file_name - default for reading.
The newly created FortIO handle will open the underlying FILE*
for reading, but if you pass the flag mode=FortIO.WRITE_MODE
the file will be opened for writing.
Observe that the flag @endian_flip_header will only affect the
interpretation of the block size markers in the file, endian
flipping of the actual data blocks must be handled at a higher
level.
When you are finished working with the FortIO instance you can
manually close it with the close() method, alternatively that
will happen automagically when it goes out of scope.
Small example script opening a restart file, and then writing
all the pressure keywords to another file:
import sys
from ert.ecl import FortIO, EclFile
rst_file = EclFile(sys.argv[1])
fortio = FortIO("PRESSURE", mode=FortIO.WRITE_MODE)
for kw in rst_file:
if kw.name() == "PRESSURE":
kw.write(fortio)
fortio.close()
See the documentation of openFortIO() for an alternative
method based on a context manager and the with statement.
"""
read_modes = (FortIO.READ_MODE, FortIO.APPEND_MODE, FortIO.READ_AND_WRITE_MODE)
if mode in read_modes and not os.path.exists(file_name):
raise IOError('No such f
|
ile "%s".' % file_name)
if mode == FortIO.READ_MODE:
c_pointer = self._open_reader(file_name, fmt_file, endian_flip_header)
elif mode == FortIO.WRITE_MODE:
c_pointer = self._open_writer(file_name, fmt_file, endian_flip_header)
elif mode == FortIO.READ_AND_WRITE_MODE:
c_pointer = self._open_readwrite(file_name, fmt_file, endian_flip_header)
elif mode == FortIO.APPEND_MODE:
c_pointer = self._o
|
pen_append(file_name, fmt_file, endian_flip_header)
else:
raise UserWarning("Unknown mode: %d" % mode)
self.__mode = mode
if not c_pointer:
raise IOError('Failed to open FortIO file "%s".' % file_name)
super(FortIO, self).__init__(c_pointer)
def close(self):
if self:
self._close()
self._invalidateCPointer()
def getPosition(self):
""" @rtype: long """
return self._get_position()
def truncate(self, size=None):
"""Will truncate the file to new size.
If the method is called without a size argument the stream
will be truncated to the current position.
"""
if size is None:
size = self.getPosition()
if not self._truncate(size):
raise IOError("Truncate of fortran filehandle:%s failed" % self.filename())
def filename(self):
return self._filename()
def seek(self, position, whence=0):
# SEEK_SET = 0
# SEEK_CUR = 1
# SEEK_END = 2
self._seek(position, whence)
@classmethod
def isFortranFile(cls, filename, endian_flip=True):
"""@rtype: bool
@type filename: str
Will use heuristics to try to guess if @filename is a binary
file written in fortran style. ASCII files will return false,
even if they are structured as ECLIPSE keywords.
"""
return cls._guess_fortran(filename, endian_flip)
def free(self):
self.close()
class FortIOContextManager(object):
def __init__(self, fortio):
self.__fortio = fortio
def __enter__(self):
return self.__fortio
def __exit__(self, exc_type, exc_val, exc_tb):
self.__fortio.close()
return exc_type is not None
def openFortIO(file_name, mode=FortIO.READ_MODE, fmt_file=False, endian_flip_header=True):
"""Will create FortIO based context manager for use with with.
The with: statement and context managers is a good alternative in
the situation where you need to ensure resource cleanup.
import sys
from ert.ecl import FortIO, EclFile
rst_file = EclFile(sys.argv[1])
with openFortIO("PRESSURE", mode=FortIO.WRITE_MODE) as fortio:
for kw in rst_file:
if kw.name() == "PRESSURE":
kw.write(fortio)
"""
return FortIOContextManager(FortIO(file_name, mode=mode, fmt_file=fmt_file,
endian_flip_header=endian_flip_header))
|
krzotr/kismon
|
kismon/windows/channel.py
|
Python
|
bsd-3-clause
| 3,856
| 0.038641
|
from gi.repository import Gtk
class ChannelWindow:
def __init__(self, sources, client_thread):
self.sources = sources
self.client_thread = client_thread
self.changes = {}
self.widgets = {}
self.gtkwin = Gtk.Window()
self.gtkwin.set_position(Gtk.WindowPosition.CENTER)
self.gtkwin.set_default_size(320, 240)
self.gtkwin.set_title("Configure Channel")
vbox = Gtk.VBox()
self.gtkwin.add(vbox)
self.sources_list = Gtk.VBox()
sources_list_scroll = Gtk.ScrolledWindow()
sources_list_scroll.add(self.sources_list)
sources_list_scroll.get_children()[0].set_shadow_type(Gtk.ShadowType.NONE)
sources_list
|
_scroll.set_po
|
licy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
vbox.pack_start(sources_list_scroll, True, True, 0)
for uuid in self.sources:
self.widgets[uuid] = {}
source = self.sources[uuid]
frame = Gtk.Frame()
frame.set_label(source["username"])
self.sources_list.pack_start(frame, False, False, 0)
table = Gtk.Table(n_rows=3, n_columns=3)
frame.add(table)
hop_button = Gtk.RadioButton.new_with_label_from_widget(None, 'Hop')
if source["hop"] > 0:
hop_button.clicked()
hop_button.connect("clicked", self.on_change_mode, uuid, "hop")
hop_button.set_alignment(0,0)
table.attach(hop_button, 0, 1, 0, 1)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(3)
field.set_increments(1,10)
field.set_range(1,100)
field.set_value(source["velocity"])
if source["hop"] == 0:
field.set_sensitive(False)
self.widgets[uuid]["hop"] = field
field.connect("changed", self.on_change_value, uuid, "hop")
table.attach(field, 1, 2, 0, 1, xoptions=Gtk.AttachOptions.SHRINK)
label = Gtk.Label(label="rate")
label.set_justify(Gtk.Justification.LEFT)
label.set_alignment(0.1,0.5)
table.attach(label, 2, 3, 0, 1, xoptions=Gtk.AttachOptions.FILL)
lock_button = Gtk.RadioButton.new_with_label_from_widget(hop_button, "Lock")
if source["hop"] == 0:
lock_button.clicked()
lock_button.connect("clicked", self.on_change_mode, uuid, "lock")
hop_button.set_alignment(0,0)
table.attach(lock_button, 0, 1, 1, 2)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(3)
field.set_increments(1,10)
field.set_range(1,100)
if source["hop"] == 0:
field.set_value(source["channel"])
else:
field.set_value(1)
field.set_sensitive(False)
self.widgets[uuid]["lock"] = field
field.connect("changed", self.on_change_value, uuid, "lock")
table.attach(field, 1, 2, 1, 2, xoptions=Gtk.AttachOptions.SHRINK)
label = Gtk.Label(label="channel")
label.set_justify(Gtk.Justification.FILL)
label.set_alignment(0.1,0.5)
table.attach(label, 2, 3, 1, 2, xoptions=Gtk.AttachOptions.FILL)
button_box = Gtk.HButtonBox()
vbox.pack_end(button_box, False, False, 0)
cancel_button = Gtk.Button.new_with_mnemonic('_Cancel')
cancel_button.connect("clicked", self.on_cancel)
button_box.add(cancel_button)
apply_button = Gtk.Button.new_with_mnemonic('_Apply')
apply_button.connect("clicked", self.on_apply)
button_box.add(apply_button)
self.gtkwin.show_all()
def on_change_mode(self, widget, uuid, mode):
if not widget.get_active():
return
self.changes[uuid] = mode
self.widgets[uuid][mode].set_sensitive(True)
if mode == "lock":
self.widgets[uuid]["hop"].set_sensitive(False)
else:
self.widgets[uuid]["lock"].set_sensitive(False)
def on_change_value(self, widget, uuid, mode):
self.changes[uuid] = mode
def on_apply(self, widget):
for uuid in self.changes:
mode = self.changes[uuid]
value = int(self.widgets[uuid][mode].get_value())
self.client_thread.client.set_channel(uuid, mode, value)
self.gtkwin.destroy()
def on_cancel(self, widget):
self.gtkwin.destroy()
|
zinnschlag/high-pygtk
|
highgtk/present/default/inquiry.py
|
Python
|
lgpl-3.0
| 2,487
| 0.018898
|
import gtk
import highgtk.entity
import highgtk.present.default.layout
def add (inquiry):
window = getattr (inquiry, "present_window", None)
if window is None:
inquiry.present_window = gtk.Dialog()
title = getattr (inquiry, "title", None)
if title is None:
root = highgtk.entity.get_root (inquiry)
title = "Inquiry from %s" % root.name
inquiry.present_window.add_button (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
ok_text = getattr (inquiry, "ok_text", gtk.STOCK_OK)
inquiry.present_window.add_button (ok_text, gtk.RESPONSE_OK)
inquiry.present_window.set_default_response (gtk.RESPONSE_OK)
inquiry.present_window.connect ("response", response, inquiry)
inquiry.present_window.connect ("delete_event", delete_event, inquiry)
inquiry.present_window.set_title (title)
inquiry.present_window.set_position (gtk.WIN_POS_CENTER)
inquiry.present_layout = highgtk.present.default.layout.get_layout (inquiry.data)
inquiry.present_layout.build (inquiry.present_window.get_content_area())
inquiry.present_report = gtk.Label()
inquiry.present_report.set_line_wrap (True)
inquiry.present_report.set_alignment (0.0, 0.5)
inquiry.present_window.get_content_area().pack_end (inquiry.present_report)
inquiry.present_window.show_all()
else:
window.present()
def remove (inquiry):
window = getattr (inquiry, "present_window", None)
if window is not None:
window.hide()
del inquiry.present_window
def cancel (inquiry):
method_n
|
ame = getattr (inquiry, "cancel_method", None)
if method_name is not None:
method = getattr (inquiry.parent, method_name)
method (inquiry)
inquiry.remove (inquiry)
def okay (inquiry)
|
:
method_name = getattr (inquiry, "ok_method", "inquiry_okay")
error = inquiry.present_layout.get_error()
if error is not None:
inquiry.error_report.primary = error
inquiry.add (inquiry.error_report)
else:
method = getattr (inquiry.parent, method_name)
method (inquiry, inquiry.present_layout.get_data())
inquiry.remove (inquiry)
def response (widget, response_id, inquiry):
if response_id==gtk.RESPONSE_OK:
okay (inquiry)
elif response_id==gtk.RESPONSE_CANCEL:
cancel (inquiry)
return True
def delete_event (widget, event, inquiry):
cancel (inquiry)
return True
|
Taiwanese-Corpus/kaxabu-muwalak-misa-a-ahan-bizu
|
後端/kaxabu/urls.py
|
Python
|
mit
| 819
| 0
|
"""kaxabu URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import H
|
ome
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url,
|
include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^', include('族語辭典.網址')),
url(r'^admin/', admin.site.urls),
]
|
flavour/eden
|
modules/templates/BRCMS/RLP/anonymize.py
|
Python
|
mit
| 7,359
| 0.001223
|
# -*- coding: utf-8 -*-
from uuid import uuid4
from gluon import current
def rlpcm_person_anonymize():
""" Rules to anonymize a case file """
auth = current.auth
s3db = current.s3db
ANONYMOUS = "-"
# Standard anonymizers
from s3db.pr import pr_address_anonymise as anonymous_address, \
pr_person_obscure_dob as obscure_dob
# Helper to produce an anonymous ID (pe_label)
anonymous_id = lambda record_id, f, v: "NN%s" % uuid4().hex[-8:].upper()
anonymous_code = lambda record_id, f, v: uuid4().hex
# Case Activity Default Closure
activity_closed = s3db.br_case_activity_default_status(closing=True)
# General rule for attachments
documents = ("doc_document", {
"key": "doc_id",
"match": "doc_id",
"fields": {"name": ("set", ANONYMOUS),
"file": "remove",
"url": "remove",
"comments": "remove",
},
"delete": True,
})
# Rule for direct offers (from the offerer perspective)
direct_offers = ("br_direct_offer", {
"key": "offer_id",
"match": "id",
"delete": True,
})
# Rules for user accounts
account = ("auth_user", {
"key": "id",
"match": "user_id",
"fields": {"id": auth.s3_anonymise_roles,
"first_name": ("set", "-"),
"last_name": "remove",
"email": anonymous_code,
"organisation_id": "remove",
"password": auth.s3_anonymise_password,
"deleted": ("set", True),
},
})
# Rules
rules = [
# Rules to remove PID from person record and case file
{"name": "default",
"title": "Names, IDs, Reference Numbers, Contact Information, Addresses",
"fields": {"first_name": ("set", ANONYMOUS),
"last_name": ("set", ANONYMOUS),
"pe_label": anonymous_id,
"date_of_birth": obscure_dob,
"comments": "remove",
},
"cascade": [("br_case", {
"key": "person_id",
"match": "id",
"fields": {"comments": "remove",
},
"cascade": [documents,
],
}),
("pr_contact", {
"key": "pe_id",
"match": "pe_id",
"fields": {"contact_description": "remove",
"value": ("set", ""),
"comments": "remove",
},
"delete": True,
}),
("pr_contact_emergency", {
"key": "pe_id",
"match": "pe_id",
"fields": {"name": ("set", ANONYMOUS),
"relationship": "remove",
"phone": "remove",
"comments": "remove",
},
"delete": True,
}),
("pr_address", {
"key": "pe_id",
"match": "pe_id",
"fields": {"location_id": anonymous_address,
"comments": "remove",
},
}),
("pr_person_details", {
"key": "person_id",
"match": "id",
"fields": {"education": "remove",
"occupation": "remove",
},
}),
("pr_image", {
|
"key": "pe_id"
|
,
"match": "pe_id",
"fields": {"image": "remove",
"url": "remove",
"description": "remove",
},
"delete": True,
}),
("hrm_human_resource", {
"key": "person_id",
"match": "id",
"fields": {"status": ("set", 2),
"site_id": "remove",
"comments": "remove",
},
}),
],
},
# Rules to remove PID from activities and offers
{"name": "activities",
"title": "Needs Reports and Offers of Assistance",
"cascade": [("br_case_activity", {
"key": "person_id",
"match": "id",
"fields": {"location_id": anonymous_address,
"subject": ("set", ANONYMOUS),
"need_details": "remove",
"activity_details": "remove",
"outcome": "remove",
"comments": "remove",
"status_id": ("set", activity_closed),
},
"cascade": [documents,
],
}),
("br_assistance_offer", {
"key": "pe_id",
"match": "pe_id",
"fields": {"name": ("set", ANONYMOUS),
"description": "remove",
"capacity": "remove",
"location_id": anonymous_address,
"contact_name": "remove",
"contact_phone": "remove",
"contact_email": "remove",
"availability": ("set", "RTD"),
"comments": "remove",
},
"cascade": [direct_offers,
],
}),
],
},
# Rules to unlink and remove user account
{"name": "account",
"title": "User Account",
"cascade": [("pr_person_user", {
"key": "pe_id",
"match": "pe_id",
"cascade": [account,
],
"delete": True,
}),
],
},
]
return rules
|
MahjongRepository/mahjong
|
mahjong/hand_calculating/yaku_list/honroto.py
|
Python
|
mit
| 679
| 0
|
from functools import reduce
from mahjong.constants import HONOR_INDICES, TERMINAL_INDICES
from mahjong.hand_calculating.yaku import Yaku
class Honroto(Yaku):
"""
All tiles are terminals or honours
"""
def __init__(self, yaku_id=None):
super(Honroto, self).__init__(yaku_id)
|
def set_attributes(self):
self.tenhou_id = 31
self.name = "Honroutou"
self.han_open = 2
self.han_closed = 2
self.is_yakuman = False
def is_condition_met(self, hand, *args):
indices = reduce(lambda z, y: z + y, hand)
result = HONOR_INDICES + TERMINAL_INDICES
|
return all(x in result for x in indices)
|
jbking/python-stdnet
|
stdnet/apps/columnts/npts.py
|
Python
|
bsd-3-clause
| 1,723
| 0
|
'''Experimental!
This is an experimental module for converting ColumnTS into
dynts.timeseries. It requires dynts_.
.. _dynts: https://github.com/quantmind/dynts
'''
from collections import Mapping
from . import models as columnts
import numpy as ny
from dynts import timeseries, tsname
class ColumnTS(columnts.ColumnTS):
'''Integrate stdnet timeseries with dynts_ TimeSeries'''
def front(self, *
|
fields):
|
'''Return the front pair of the structure'''
ts = self.irange(0, 0, fields=fields)
if ts:
return ts.start(), ts[0]
def back(self, *fields):
'''Return the back pair of the structure'''
ts = self.irange(-1, -1, fields=fields)
if ts:
return ts.end(), ts[0]
def load_data(self, result):
loads = self.pickler.loads
vloads = self.value_pickler.loads
dt, va = result
if result[0] and va:
dates = ny.array([loads(t) for t in dt])
fields = []
vals = []
if not isinstance(va, Mapping):
va = dict(va)
for f in sorted(va):
fields.append(f)
data = va[f]
vals.append((vloads(v) for v in data))
values = ny.array(list(zip(*vals)))
name = tsname(*fields)
else:
name = None
dates = None
values = None
return timeseries(name=name, date=dates, data=values)
def _get(self, result):
ts = self.load_data(result)
return ts[0]
class ColumnTSField(columnts.ColumnTSField):
def structure_class(self):
return ColumnTS
|
Microsoft/ApplicationInsights-Python
|
tests/applicationinsights_tests/channel_tests/contracts_tests/TestData.py
|
Python
|
mit
| 1,545
| 0.006472
|
import unittest
import datetime
import uuid
import sys
import json
import sys, os, os.path
root_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..', '..')
if root_directory not in sys.path:
sys.path.append(root_directory)
from applicationinsights.channel.contracts import *
from .Utils import TestJsonEncoder
clas
|
s TestData(unittest.TestCase):
def test_construct(self):
item = Data()
self.assertNotEqual(item, None)
def test_base_type_property_works_as_expected(self):
expected = 'Test string'
item = Data()
item.base_type = expected
actual = item.base_type
self.assertEqual(expected, actual)
expected = 'Other string'
item.base_type = expected
actual = item.base_type
self.assertEqua
|
l(expected, actual)
def test_base_data_property_works_as_expected(self):
expected = object()
item = Data()
item.base_data = expected
actual = item.base_data
self.assertEqual(expected, actual)
expected = object()
item.base_data = expected
actual = item.base_data
self.assertEqual(expected, actual)
def test_serialize_works_as_expected(self):
item = Data()
item.base_type = 'Test string'
item.base_data = object()
actual = json.dumps(item.write(), separators=(',', ':'), cls=TestJsonEncoder)
expected = '{"baseType":"Test string","baseData":{}}'
self.assertEqual(expected, actual)
|
0x0AF/lamure
|
pypro/tests/context.py
|
Python
|
bsd-3-clause
| 143
| 0.013986
|
# -*- coding: utf-8 -*-
imp
|
ort sys
import os
sys.path.in
|
sert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import pypro
|
efectivo/network_sim
|
grid1_basic.py
|
Python
|
mit
| 755
| 0.006623
|
from units import runner
import numpy as np
def job_gen():
for N
|
in range(4, 21, 2):
for rate_power in [1, 3/2.]:
rate = np.power(N, rate_power)
test = {
'test': {},
'net': {'topology': 'grid', 'N': N},
'pattern': {'type': 'random_one_bent', 'rate': rate, 'power': rate_power},
'cycles': 100000,
'protocols': [
{'type': 'greedy', 'scheduler': 'LIS'},
|
{'type': 'goed', 'dh_type':'ogh', 'p': .1, 'scheduler': 'LIS'},
{'type': 'goed', 'dh_type': 'ogh', 'p': .5, 'scheduler': 'LIS'}
]
}
yield test
runner.run_parallel(15, job_gen, 'grid1')
|
prasannav7/ggrc-core
|
src/ggrc/migrations/versions/20151112145524_35e5344803b4_add_missing_constraints_for_vendors.py
|
Python
|
apache-2.0
| 783
| 0.003831
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Add missing constraints for vendo
|
rs
Revision ID: 35e5344803b4
Revises: 27684e5f313a
Create Date: 2015-11-12 14:55:24.420680
"""
from alembic import op
from ggrc.migrations.utils import resolve_duplicates
from ggrc.models
|
import Vendor
# revision identifiers, used by Alembic.
revision = '35e5344803b4'
down_revision = '27684e5f313a'
def upgrade():
resolve_duplicates(Vendor, "slug")
op.create_unique_constraint('uq_slug_vendors', 'vendors', ['slug'])
def downgrade():
op.drop_constraint('uq_slug_vendors', 'vendors', 'unique')
|
jejimenez/django
|
tests/csrf_tests/tests.py
|
Python
|
bsd-3-clause
| 19,350
| 0.000413
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.middleware.csrf import (
CSRF_KEY_LENGTH, CsrfViewMiddleware, get_token,
)
from django.template import RequestContext, Template
from django.template.context_processors import csrf
from django.test import SimpleTestCase, override_settings
from django.views.decorators.csrf import (
csrf_exempt, ensure_csrf_cookie, requires_csrf_token,
)
# Response/views used for CsrfResponseMiddleware and CsrfViewMiddleware tests
def post_form_response():
resp = HttpResponse(content="""
<html><body><h1>\u00a1Unicode!<form method="post"><input type="text" /></form></body></html>
""", mimetype="text/html")
return resp
def post_form_view(request):
"""A view that returns a POST form (without a token)"""
return post_form_response()
# Response/views used for template tag tests
def token_view(request):
"""A view that uses {% csrf_token %}"""
context = RequestContext(request, processors=[csrf])
template = Template("{% csrf_token %}")
return HttpResponse(template.render(context))
def non_token_view_using_request_processor(request):
"""
A view that doesn't use the token, but does use the csrf view processor.
"""
context = RequestContext(request, processors=[csrf])
template = Template("")
return HttpResponse(template.render(context))
class TestingHttpRequest(HttpRequest):
"""
A version of HttpRequest that allows us to change some things
more easily
"""
def is_secure(self):
return getattr(self, '_is_secure_override', False)
class CsrfViewMiddlewareTest(SimpleTestCase):
# The csrf token is potentially from an untrusted source, so could have
# characters that need dealing with.
_csrf_id_cookie = b"<1>\xc2\xa1"
_csrf_id = "1"
def _get_GET_no_csrf_cookie_request(self):
return TestingHttpRequest()
def _get_GET_csrf_cookie_request(self):
req = TestingHttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie
return req
def _get_POST_csrf_cookie_request(self):
req = self._get_GET_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_no_csrf_cookie_request(self):
req = self._get_GET_no_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_request_with_token(self):
req = self._get_POST_csrf_cookie_request()
req.POST['csrfmiddlewaretoken'] = self._csrf_id
return req
def _check_token_present(self, response, csrf_id=None):
self.assertContains(response, "name='csrfmiddlewaretoken' value='%s'" % (csrf_id or self._csrf_id))
def test_process_view_token_too_long(self):
"""
Check that if the token is longer than expected, it is ignored and
a new token is created.
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 10000000
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(len(csrf_cookie.value), CSRF_KEY_LENGTH)
def test_process_response_get_token_used(self):
"""
When get_token is used, check that the cookie is created and headers
patched.
"""
req = self._get_GET_no_csrf_cookie_request()
# Put tests for CSRF_COOKIE_* settings here
with self.settings(CSRF_COOKIE_NAME='myname',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get('myname', False)
self.assertNotEqual(csrf_cookie, False)
self.assertEqual(csrf_cookie['domain'], '.example.com')
self.assertEqual(csrf_cookie['secure'], True)
self.assertEqual(csrf_cookie['httponly'], True)
self.assertEqual(csrf_cookie['path'], '/test/')
self.assertIn('Cookie', resp2.get('Vary', ''))
def test_process_response_get_token_not_used(self):
"""
Check that if get_token() is not called, the view middleware does not
add a cookie.
"""
# This is important to make pages cacheable. Pages which do call
# get_token(), assuming they use the token, are not cacheable because
# the token is specific to the user
req = self._get_GET_no_csrf_cookie_request()
# non_token_view_using_request_processor does not call get_token(), but
# does use the csrf request processor. By using this, we are testing
# that the view processor is properly lazy and doesn't call get_token()
# until needed.
CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {})
resp = non_token_view_using_request_processor(req)
resp2 = CsrfView
|
Middleware().process_response(req, resp)
csrf_cooki
|
e = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(csrf_cookie, False)
# Check the request processing
def test_process_request_no_csrf_cookie(self):
"""
Check that if no CSRF cookies is present, the middleware rejects the
incoming request. This will stop login CSRF.
"""
req = self._get_POST_no_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_no_token(self):
"""
Check that if a CSRF cookie is present but no token, the middleware
rejects the incoming request.
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_and_token(self):
"""
Check that if both a cookie and a token is present, the middleware lets it through.
"""
req = self._get_POST_request_with_token()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_process_request_csrf_cookie_no_token_exempt_view(self):
"""
Check that if a CSRF cookie is present and no token, but the csrf_exempt
decorator has been applied to the view, the middleware lets it through
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {})
self.assertIsNone(req2)
def test_csrf_token_in_header(self):
"""
Check that we can pass in the token in a header instead of in the form
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(CSRF_HEADER_NAME='HTTP_X_CSRFTOKEN_CUSTOMIZED')
def test_csrf_token_in_header_with_customized_name(self):
"""
settings.CSRF_HEADER_NAME can be used to customize the CSRF header name
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN_CUSTOMIZED'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_put_and_delete_rejected(self):
"""
Tests that HTTP PUT and DELETE methods have protection
"""
req = TestingHttpRequest()
req.method = 'PUT'
|
HashirZahir/FIFA-Player-Ratings
|
FIFAscrape/spiders/fifa_spider.py
|
Python
|
mit
| 3,458
| 0.014748
|
from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapy.http import Html
|
Response
from FIFAscrape.items import PlayerItem
from urlparse import urlparse, urljoin
from scrapy.http.request import Request
from scrapy.conf import settings
import random
import time
class fifaSpider(Spider):
name = "fifa"
|
allowed_domains = ["futhead.com"]
start_urls = [
"http://www.futhead.com/16/players/?level=all_nif&bin_platform=ps"
]
def parse(self, response):
#obtains links from page to page and passes links to parse_playerURL
sel = Selector(response) #define selector based on response object (points to urls in start_urls by default)
url_list = sel.xpath('//a[@class="display-block padding-0"]/@href') #obtain a list of href links that contain relative links of players
for i in url_list:
relative_url = self.clean_str(i.extract()) #i is a selector and hence need to extract it to obtain unicode object
print urljoin(response.url, relative_url) #urljoin is able to merge absolute and relative paths to form 1 coherent link
req = Request(urljoin(response.url, relative_url),callback=self.parse_playerURL) #pass on request with new urls to parse_playerURL
req.headers["User-Agent"] = self.random_ua()
yield req
next_url=sel.xpath('//div[@class="right-nav pull-right"]/a[@rel="next"]/@href').extract_first()
if(next_url): #checks if next page exists
clean_next_url = self.clean_str(next_url)
reqNext = Request(urljoin(response.url, clean_next_url),callback=self.parse) #calls back this function to repeat process on new list of links
yield reqNext
def parse_playerURL(self, response):
#parses player specific data into items list
site = Selector(response)
items = []
item = PlayerItem()
item['1name'] = (response.url).rsplit("/")[-2].replace("-"," ")
title = self.clean_str(site.xpath('/html/head/title/text()').extract_first())
item['OVR'] = title.partition("FIFA 16 -")[1].split("-")[0]
item['POS'] = self.clean_str(site.xpath('//div[@class="playercard-position"]/text()').extract_first())
#stats = site.xpath('//div[@class="row player-center-container"]/div/a')
stat_names = site.xpath('//span[@class="player-stat-title"]')
stat_values = site.xpath('//span[contains(@class, "player-stat-value")]')
for index in range(len(stat_names)):
attr_name = stat_names[index].xpath('.//text()').extract_first()
item[attr_name] = stat_values[index].xpath('.//text()').extract_first()
items.append(item)
return items
def clean_str(self,ustring):
#removes wierd unicode chars (/u102 bla), whitespaces, tabspaces, etc to form clean string
return str(ustring.encode('ascii', 'replace')).strip()
def random_ua(self):
#randomise user-agent from list to reduce chance of being banned
ua = random.choice(settings.get('USER_AGENT_LIST'))
if ua:
ua='Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36'
return ua
|
bsmithers/CLIgraphs
|
histogram.py
|
Python
|
agpl-3.0
| 8,515
| 0.003641
|
#!/usr/bin/env python2
from __future__ import division
import itertools
import math
import sys
import numpy
import scipy.stats
import cligraph
import utils
"""
TODO:
- Auto-detect number of bins
- Fixed width or variable width bins
- Stacked bins, overlapped bins or bins next to each other
- Change which side of bin is open (default: bins are half-open, closed on left, except final bin
which is closed both sides)
"""
class Histogram(cligraph.CLIGraph):
def __init__(self, **kwargs):
super(Histogram, self).__init__(**kwargs)
self.data = []
self.data_params = []
def check_args(self, cli_args, inputs):
super(Histogram, self).check_args(cli_args, inputs)
self.fields = utils.get_columns_from_string(cli_args.field)
self.colours = itertools.cycle(cli_args.colours.split(','))
self.markers = itertools.cycle(cli_args.markers)
self.alphas = utils.map_csv_to_cycle(cli_args.alpha, float)
self.histtypes = itertools.cycle(cli_args.hist_type.split(','))
if cli_args.legends:
self.legends = itertools.cycle(cli_args.legends)
else:
self.legends = itertools.cycle([None])
# Should we store all data and render only after reading everything?
self.store = False
if cli_args.unify_bins:
self.store = True
# Set bin defaults if none given
if not cli_args.bins and not cli_args.bin_size:
cli_args.bins = 10
return bool(self.fields) and bool(self.alphas)
def get_parser(self):
parser = super(Histogram, self).get_parser()
# Inputs
parser.add_argument('-f', '--field', help='Column to read values from. (1-based indexing). \
Unix cut format for multiple columns. Default = 1', default='1')
# Histogram setup
parser.add_argument('--normed', help='Normalise frequency?', action="store_true",
default=False)
parser.add_argument("--cumulative", help="Cumulative Frequency? Default=0",
action="store_true", default=False)
parser.add_argument("--logscale", help="Use a logarithmic y-axs", action="store_true",
default=False)
parser.add_argument("--legends", nargs="+", help="Dataset legends", default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument('-b', '--bins', help='Number of bins. If not given and bin-size not \
given, this will default to 10', type=int)
group.add_argument('-z', '--bin-size', help='Size of each bin', type=float)
parser.add_argument('-u', '--unify-bins', action="store_true", default=False,
help='Unify bin sizes across different input sources')
parser.add_argument('--disable-bin-offset', help="By default, bins are offset by half their\
width to help bins straddle integer values for example",
action="store_true", default=False)
# Visual
parser.add_argument('-c', '--colours', default='r,g,b,c,y,m,k')
parser.add_argument('-m', '--markers', default=' ')
parser.add_argument('-a', '--alpha', default='0.5')
parser.add_argument('-y', '--hist-type', default='bar')
return parser
def input_started_hook(self, axes, cli_args, inp, inp_index):
"""
Setup data structures
"""
if not self.store:
self.data = []
self.data_params = []
for _ in self.fields:
self.data.append([])
self.data_params.append({'min': float('inf'), 'max': float('-inf')})
def input_ended_hook(self, axes, cli_args, inp, inp_index):
"""
Draw histogram at end of input unless we have to store data (e.g. for bin calculation)
"""
if self.store:
return
self.__draw_histogram(axes, cli_args)
def process_input_by_fields(self, axes, cli_args, inp, inp_index, fields):
"""
Store value for each dataset
"""
for index, column in enumerate(self.fields):
value = float(fields[column])
if self.store:
index = inp_index * len(self.fields) + index
# Store min/max values for bin work
self.data_params[index]['min'] = min(value, self.data_params[index]['min'])
self.data_params[index]['max'] = max(value, self.data_params[index]['max'])
self.data[index].append(float(fields[column]))
def process_input(self, axes, cli_args, inputs):
"""
If we are doing bin-size auto detection and require consist bin size
across different inputs, we will have to read all data first before
we can process
"""
super(Histogram, self).process_input(axes, cli_args, inputs)
if self.store:
self.__draw_histogram(axes, cli_args)
def apply_lables_and_titles(self, fig, axes, cli_args):
"""
Add legend if we have them
TODO: This can probably by done more generally, just have to be careful about
plots with multiple axes.
"""
super(Histogram, self).apply_lables_and_titles(fig, axes, cli_args)
if cli_args.legends:
axes.legend()
def __draw_histogram(self, axes, cli_args):
"""
Plot histograms for all datasets in current data
"""
for index, dataset in enumerate(self.data):
bins = self.__get_bins(cli_args, index)
axes.hist(dataset, bins, facecolor=self.colours.next(), alpha=self.alphas.next(),
normed=cli_args.normed, cumulative=cli_args.cumulative,
log=cli_args.logscale, label=self.legends.next(), hatch=self.markers.next(),
histtype=self.histtypes.next())
def __get_bins(self, cli_args, index):
"""
Get the bin histogram parameter for the data at the given index. Use the supplied
number of bins if given. Otherwise, calculate based on the supplied bin width.
"""
# Short-circuit if we are given number of bins and not using equal bins
if cli_args.bins and not self.store:
return cli_args.bins
# Get the minimum and maximum values either for this dataset or for all datasets
# if we are post-processing
min_val = self.data_params[index]['min']
max_val = self.data_params[index]['max']
if self.store:
min_val = min([self.data_params[i]['min'] for i in range(0, len(self.data_params))])
max_val = max([self.data_params[i]['max'] for i in range(0, len(self.data_params))])
# For a fixed number of bins, do a linear fit. Otherwise, use a range with bin size
if cli_args.bins:
# Fit one extra value to include right edge (same as normal histogram behaviour)
return numpy.linspace(min_val, max_val, cli_args.bins + 1)
# Compute bins. Do not use range as values may be floats.
# Lowest bin should be the largest multiple of bin_size that is <= min_val
# Highes
|
t bin should be smallest multiple of bin_size that is >= max_val
bins = []
i = math.floor(min_val / cli_args.bin_size) * cli_args.bin_size
# By default, bits are offset by half their width from the lowest value rather
# than by their full width
if not cli_args.disable_bin_offset:
i -= cli_args.bin_size / 2
|
else:
i -= cli_args.bin_size
while i <= max_val:
bins.append(i)
i += cli_args.bin_size
bins.append(i) # Add final bin
# Combine offscreen bins for faster renders
if cli_args.min_x and cli_args.min_x > min_val:
first_onscreen = max([index for index, b in enumerate(bins) if b <= cli_args.min_x])
# Include the first bin so that this captures everything offscren
if first_onscreen >= 2:
bins = [bins[0]] + bins[first_onscreen:]
if cli_args.max_x and cl
|
chitr/neutron
|
neutron/agent/linux/keepalived.py
|
Python
|
apache-2.0
| 15,590
| 0
|
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import itertools
import os
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.linux import external_process
from neutron.common import exceptions
from neutron.common import utils as common_utils
VALID_STATES = ['MASTER', 'BACKUP']
VALID_AUTH_TYPES = ['AH', 'PASS']
HA_DEFAULT_PRIORITY = 50
PRIMARY_VIP_RANGE_SIZE = 24
# TODO(amuller): Use L3 agent constant when new constants module is introduced.
FIP_LL_SUBNET = '169.254.30.0/23'
KEEPALIVED_SERVICE_NAME = 'keepalived'
GARP_MASTER_REPEAT = 5
GARP_MASTER_REFRESH = 10
LOG = logging.getLogger(__name__)
def get_free_range(parent_range, excluded_ranges, size=PRIMARY_VIP_RANGE_SIZE):
"""Get a free IP range, from parent_range, of the specified size.
:param parent_range: String representing an IP range. E.g: '169.254.0.0/16'
:param excluded_ranges: A list of strings to be excluded from parent_range
:param size: What should be the size of the range returned?
:return: A string representing an IP range
"""
free_cidrs = netaddr.IPSet([parent_range]) -
|
netaddr.IPSet(excluded_ranges)
for cidr in free_cidrs.iter_cidrs():
if cidr.prefixlen <= size:
|
return '%s/%s' % (cidr.network, size)
raise ValueError(_('Network of size %(size)s, from IP range '
'%(parent_range)s excluding IP ranges '
'%(excluded_ranges)s was not found.') %
{'size': size,
'parent_range': parent_range,
'excluded_ranges': excluded_ranges})
class InvalidInstanceStateException(exceptions.NeutronException):
message = _('Invalid instance state: %(state)s, valid states are: '
'%(valid_states)s')
def __init__(self, **kwargs):
if 'valid_states' not in kwargs:
kwargs['valid_states'] = ', '.join(VALID_STATES)
super(InvalidInstanceStateException, self).__init__(**kwargs)
class InvalidAuthenticationTypeException(exceptions.NeutronException):
message = _('Invalid authentication type: %(auth_type)s, '
'valid types are: %(valid_auth_types)s')
def __init__(self, **kwargs):
if 'valid_auth_types' not in kwargs:
kwargs['valid_auth_types'] = ', '.join(VALID_AUTH_TYPES)
super(InvalidAuthenticationTypeException, self).__init__(**kwargs)
class VIPDuplicateAddressException(exceptions.NeutronException):
message = _('Attempted to add duplicate VIP address, '
'existing vips are: %(existing_vips)s, '
'duplicate vip is: %(duplicate_vip)s')
def __init__(self, **kwargs):
kwargs['existing_vips'] = ', '.join(str(vip) for vip in
kwargs['existing_vips'])
super(VIPDuplicateAddressException, self).__init__(**kwargs)
class KeepalivedVipAddress(object):
"""A virtual address entry of a keepalived configuration."""
def __init__(self, ip_address, interface_name, scope=None):
self.ip_address = ip_address
self.interface_name = interface_name
self.scope = scope
def __eq__(self, other):
return (isinstance(other, KeepalivedVipAddress) and
self.ip_address == other.ip_address)
def __str__(self):
return '[%s, %s, %s]' % (self.ip_address,
self.interface_name,
self.scope)
def build_config(self):
result = '%s dev %s' % (self.ip_address, self.interface_name)
if self.scope:
result += ' scope %s' % self.scope
return result
class KeepalivedVirtualRoute(object):
"""A virtual route entry of a keepalived configuration."""
def __init__(self, destination, nexthop, interface_name=None,
scope=None):
self.destination = destination
self.nexthop = nexthop
self.interface_name = interface_name
self.scope = scope
def build_config(self):
output = self.destination
if self.nexthop:
output += ' via %s' % self.nexthop
if self.interface_name:
output += ' dev %s' % self.interface_name
if self.scope:
output += ' scope %s' % self.scope
return output
class KeepalivedInstanceRoutes(object):
def __init__(self):
self.gateway_routes = []
self.extra_routes = []
self.extra_subnets = []
def remove_routes_on_interface(self, interface_name):
self.gateway_routes = [gw_rt for gw_rt in self.gateway_routes
if gw_rt.interface_name != interface_name]
# NOTE(amuller): extra_routes are initialized from the router's
# 'routes' attribute. These routes do not have an interface
# parameter and so cannot be removed via an interface_name lookup.
self.extra_subnets = [route for route in self.extra_subnets if
route.interface_name != interface_name]
@property
def routes(self):
return self.gateway_routes + self.extra_routes + self.extra_subnets
def __len__(self):
return len(self.routes)
def build_config(self):
return itertools.chain([' virtual_routes {'],
(' %s' % route.build_config()
for route in self.routes),
[' }'])
class KeepalivedInstance(object):
"""Instance section of a keepalived configuration."""
def __init__(self, state, interface, vrouter_id, ha_cidrs,
priority=HA_DEFAULT_PRIORITY, advert_int=None,
mcast_src_ip=None, nopreempt=False,
garp_master_repeat=GARP_MASTER_REPEAT,
garp_master_refresh=GARP_MASTER_REFRESH):
self.name = 'VR_%s' % vrouter_id
if state not in VALID_STATES:
raise InvalidInstanceStateException(state=state)
self.state = state
self.interface = interface
self.vrouter_id = vrouter_id
self.priority = priority
self.nopreempt = nopreempt
self.advert_int = advert_int
self.mcast_src_ip = mcast_src_ip
self.garp_master_repeat = garp_master_repeat
self.garp_master_refresh = garp_master_refresh
self.track_interfaces = []
self.vips = []
self.virtual_routes = KeepalivedInstanceRoutes()
self.authentication = None
metadata_cidr = '169.254.169.254/32'
self.primary_vip_range = get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=[metadata_cidr, FIP_LL_SUBNET] + ha_cidrs,
size=PRIMARY_VIP_RANGE_SIZE)
def set_authentication(self, auth_type, password):
if auth_type not in VALID_AUTH_TYPES:
raise InvalidAuthenticationTypeException(auth_type=auth_type)
self.authentication = (auth_type, password)
def add_vip(self, ip_cidr, interface_name, scope):
vip = KeepalivedVipAddress(ip_cidr, interface_name, scope)
if vip in self.vips:
raise VIPDuplicateAddressException(existing_vips=self.vips,
duplicate_vip=vip)
self.vips.append(vip)
def remove_vips_vroutes_by_interface(self, interface_name):
self.vips = [vip for vip in self.vips
if vip.interface_name != interface_name]
self.virtual_routes.remove_routes_on_interface(interface_name)
def remove_vip_by_ip_address(self, ip
|
ketan-analytics/learnpython
|
IntermediatePython/Lynda_Bill_PYEssential/SQL.py
|
Python
|
gpl-2.0
| 1,160
| 0.002586
|
#!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
import sqlite3
def main():
print('connect')
db = sqlite3.connect('db-api.db')
cur = db.cursor()
print('create')
cur.execute("DROP TABLE IF EXISTS test")
cur.execute("""
CREATE TABLE test (
id INTEGER PRIMARY KEY, string TEXT, number INTEGER
)
""")
print('insert row')
cur.execute("""
INSERT INTO
|
test (string, number) VALUES ('one', 1)
""")
print('insert row')
cur.execute("""
INSERT INTO test (string, number) VALUES ('two', 2)
""")
print('insert row')
cur.execute("""
INSERT INTO test (string, number) VALUES ('three', 3)
|
""")
print('commit')
db.commit()
print('count')
cur.execute("SELECT COUNT(*) FROM test")
count = cur.fetchone()[0]
print(f'there are {count} rows in the table.')
print('read')
for row in cur.execute("SELECT * FROM test"):
print(row)
print('drop')
cur.execute("DROP TABLE test")
print('close')
db.close()
if __name__ == '__main__': main()
|
vpelletier/neoppod
|
neo/tests/zodb/testRecovery.py
|
Python
|
gpl-2.0
| 1,770
| 0.00226
|
#
# Copyright (C) 2009-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
import ZODB
from ZODB.tes
|
ts.RecoveryStorage import RecoveryStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from ..functional import NEOCluster
from
|
. import ZODBTestCase
class RecoveryTests(ZODBTestCase, StorageTestBase, RecoveryStorage):
def setUp(self):
super(RecoveryTests, self).setUp()
dst_temp_dir = self.getTempDirectory() + '-dst'
if not os.path.exists(dst_temp_dir):
os.makedirs(dst_temp_dir)
self.neo_dst = NEOCluster(['test_neo1-dst'], partitions=1, replicas=0,
master_count=1, temp_dir=dst_temp_dir)
self.neo_dst.stop()
self.neo_dst.setupDB()
self.neo_dst.start()
self._dst = self.neo.getZODBStorage()
self._dst_db = ZODB.DB(self._dst)
def _tearDown(self, success):
super(RecoveryTests, self)._tearDown(success)
self._dst_db.close()
self._dst.cleanup()
self.neo_dst.stop()
if __name__ == "__main__":
suite = unittest.makeSuite(RecoveryTests, 'check')
unittest.main(defaultTest='suite')
|
SalesforceFoundation/CumulusCI
|
cumulusci/tasks/bulkdata/generate_and_load_data.py
|
Python
|
bsd-3-clause
| 8,766
| 0.00308
|
import os
from tempfile import TemporaryDirectory
from pathlib import Path
from sqlalchemy import MetaData, create_engine
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.tasks.bulkdata import LoadData
from cumulusci.tasks.bulkdata.utils import generate_batches
from cumulusci.core.config import TaskConfig
from cumulusci.core.utils import import_global
from cumulusci.core.exceptions import TaskOptionsError
class GenerateAndLoadData(BaseSalesforceApiTask):
""" Orchestrate creating tempfiles, generating data, loading data, cleaning up tempfiles and batching."""
task_docs = """
Orchestrate creating tempfiles, generating data, loading data, cleaning up tempfiles and batching.
CCI has features for generating data and for loading them into orgs. This task pulls them
together to give some useful additional features, such as storing the intermediate data in
a tempfile (the default behavior) and generating the data in batches instead of all at
once (controlled by the `batch_size` option).
The simplest possible usage is to specify the number of records you'd like generated, a
mapping file that defines the schema and a data generation task written in Python to actually
generate the data.
Use the `num_records` option to specify how many records to generate.
Use the `mapping` option to specify a mapping file.
Use 'data_generation_task' to specify what Python class to use to generate the data.'
Use 'batch_size' to specify how many records to generate and upload in every batch.
By default it creates the data in a temporary file and then cleans it up later. Specify database_url if you
need more control than that. Existing data tables will be emptied before being refilled.
Your database will be completely deleted!
If you use database_url and batch_size together, latter batches will overwrite
earlier batches in the database and the first batch will replace tables if they exist.
A table mapping IDs to SFIds will persist across batches and will grow monotonically.
If your generator class makes heavy use of Faker, you might be interested in this patch
which frequently speeds Faker up. Adding that code to the bottom of your generator file may
help accelerate i
|
t.
https://sfdc.co/
|
bwKxDD
"""
task_options = {
"num_records": {
"description": "How many records to generate. Precise calcuation depends on the generator.",
"required": True,
},
"num_records_tablename": {
"description": "Which table to count records in.",
"required": False,
},
"batch_size": {
"description": "How many records to create and load at a time.",
"required": False,
},
"data_generation_task": {
"description": "Fully qualified class path of a task to generate the data. Look at cumulusci.tasks.bulkdata.tests.dummy_data_factory to learn how to write them.",
"required": True,
},
"data_generation_options": {
"description": "Options to pass to the data generator.",
"required": False,
},
"vars": {
"description": "Variables that the generate or load tasks might need.",
},
"replace_database": {
"description": "Confirmation that it is okay to delete the data in database_url",
},
"working_directory": {
"description": "Store temporary files in working_directory for easier debugging."
},
**LoadData.task_options,
}
task_options["mapping"]["required"] = False
def _init_options(self, kwargs):
super()._init_options(kwargs)
mapping_file = self.options.get("mapping", None)
if mapping_file:
self.mapping_file = os.path.abspath(mapping_file)
if not os.path.exists(self.mapping_file):
raise TaskOptionsError(f"{self.mapping_file} cannot be found.")
else:
self.mapping_file = None
self.database_url = self.options.get("database_url")
num_records = self.options.get("num_records")
if not num_records:
raise TaskOptionsError(
"Please specify the number of records to generate with num_records"
)
self.num_records = int(num_records)
self.batch_size = int(self.options.get("batch_size", self.num_records))
if self.batch_size <= 0:
raise TaskOptionsError("Batch size should be greater than zero")
class_path = self.options.get("data_generation_task")
if class_path:
self.data_generation_task = import_global(class_path)
else:
raise TaskOptionsError("No data generation task specified")
self.working_directory = self.options.get("working_directory", None)
self.database_url = self.options.get("database_url")
if self.database_url:
engine, metadata = self._setup_engine(self.database_url)
tables = metadata.tables
if len(list(tables)) and not self.options.get("replace_database"):
raise TaskOptionsError(
f"Database {self.database_url} has tables "
f"({list(tables)}) "
"but `replace_database` was not specified"
)
def _run_task(self):
with TemporaryDirectory() as tempdir:
working_directory = self.options.get("working_directory")
if working_directory:
tempdir = Path(working_directory)
tempdir.mkdir(exist_ok=True)
for current_batch_size, index in generate_batches(
self.num_records, self.batch_size
):
self.logger.info(
f"Generating a data batch, batch_size={current_batch_size} "
f"index={index} total_records={self.num_records}"
)
self._generate_batch(
self.database_url,
self.working_directory or tempdir,
self.mapping_file,
current_batch_size,
index,
)
def _datagen(self, subtask_options):
task_config = TaskConfig({"options": subtask_options})
data_gen_task = self.data_generation_task(
self.project_config, task_config, org_config=self.org_config
)
data_gen_task()
def _dataload(self, subtask_options):
subtask_config = TaskConfig({"options": subtask_options})
subtask = LoadData(
project_config=self.project_config,
task_config=subtask_config,
org_config=self.org_config,
flow=self.flow,
name=self.name,
stepnum=self.stepnum,
)
subtask()
def _generate_batch(self, database_url, tempdir, mapping_file, batch_size, index):
"""Generate a batch in database_url or a tempfile if it isn't specified."""
if not database_url:
sqlite_path = Path(tempdir) / "generated_data.db"
database_url = f"sqlite:///{sqlite_path}"
self._cleanup_object_tables(*self._setup_engine(database_url))
subtask_options = {
**self.options,
"mapping": mapping_file,
"reset_oids": False,
"database_url": database_url,
"num_records": batch_size,
"current_batch_number": index,
"working_directory": tempdir,
}
# some generator tasks can generate the mapping file instead of reading it
if not subtask_options.get("mapping"):
temp_mapping = Path(tempdir) / "temp_mapping.yml"
mapping_file = self.options.get("generate_mapping_file", temp_mapping)
subtask_options["generate_mapping_file"] = mapping_file
self._datagen(subtask_options)
if not subtask_options.get("mapping"):
subtask_options["mapping"] = mapping_file
self._dataload(subtask_options)
def _setup_engine(self, datab
|
denisenkom/django
|
tests/signals/tests.py
|
Python
|
bsd-3-clause
| 5,273
| 0.000759
|
from __future__ import unicode_literals
from django.db.models import signals
from django.dispatch import receiver
from django.test import TestCase
from django.utils import six
from .models import Person, Car
# #8285: signals can be any callable
class PostDeleteHandler(object):
def __init__(self, data):
self.data = data
def __call__(self, signal, sender, instance, **kwargs):
self.data.append(
(instance, instance.id is None)
)
class MyReceiver(object):
def __init__(self, param):
self.param = param
self._run = False
def __call__(self, signal, sender, **kwargs):
self._run = True
signal.disconnect(receiver=self, sender=sender)
class SignalTests(TestCase):
def test_basic(self):
# Save up the number of connected signals so that we can check at the
# end that all the signals we register get properly unregistered (#9989)
pre_signals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
data = []
def pre_save_test(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("raw", False))
)
signals.pre_save.connect(pre_save_test)
def post_save_test(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("created"), kwargs.get("raw", False))
)
signals.post_save.connect(post_save_test)
def pre_delete_test(signal, sender, instance, **kwargs):
data.append(
(instance, instance.id is None)
)
signals.pre_delete.connect(pre_delete_test)
post_delete_test = PostDeleteHandler(data)
signals.post_delete.connect(post_delete_test)
# throw a decorator syntax receiver into the mix
@receiver(signals.pre_save)
def pre_save_decorator_test(signal, sender, instance, **kwargs):
data.append(instance)
@receiver(signals.pre_save, sender=Car)
def pre_save_decorator_sender_test(signal, sender, instance, **kwargs):
data.append(instance)
p1 = Person(first_name="John", last_name="Smith")
self.assertEqual(data, [])
p1.save()
self.assertEqual(data, [
(p1, False),
p1,
(p1, True, False),
])
data[:] = []
p1.first_name = "Tom"
p1.save()
self.assertEqual(data, [
(p1, False),
p1,
(p1, False, False),
])
data[:] = []
# Car signal (sender defined)
c1 = Car(make="Volkswagon", model="Passat")
c1.save()
self.assertEqual(data, [
(c1, False),
c1,
c1,
(c1, True, False),
])
data[:] = []
# Calling an internal method purely so that we can trigger a "raw" save.
p1.save_base(raw=True)
self.assertEqual(data, [
(p1, True),
p1,
(p1, False, True),
])
data[:] = []
p1.delete()
self.assertEqual(data, [
(p1, False),
(p1, False),
])
data[:] = []
p2 = Person(first_name="James", last_name="Jones")
p2.id = 99999
p2.save()
self.assertEqual(data, [
(p2, False),
p2,
(p2, True, False),
])
data[:] = []
p2.id = 99998
p2.save()
self.assertEqual(data, [
(p2, False),
p2,
(p2, True, False),
])
data[:] = []
p2.delete()
self.assertEqual(data, [
(p2, False),
(p2, False)
])
self.assertQuerysetEqual(
Person.objects.all(), [
"James Jones",
],
six.text_type
)
signals.post_delete.disconnect(post_delete_test)
signals.pre_delete.disconnect(pre_delete_test)
signals.post_save.disconnect(post_save_test)
signals.pre_save.disconnect(pre_save_test)
signals.pre_save.disconnect(pre_save_decorator_test)
signals.pre_save.disconnect(pre_save_decorator_sender_test, sender=Car)
# Check that all our signals got disconnected properly.
post_si
|
gnals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
self.assertEqual(pre_signals, post_signals)
def test_disconnect_in_dispatch(self):
"""
Test that signals that disconnect when being called don't mess future
dispat
|
ching.
"""
a, b = MyReceiver(1), MyReceiver(2)
signals.post_save.connect(sender=Person, receiver=a)
signals.post_save.connect(sender=Person, receiver=b)
p = Person.objects.create(first_name='John', last_name='Smith')
self.assertTrue(a._run)
self.assertTrue(b._run)
self.assertEqual(signals.post_save.receivers, [])
|
DigitalSlideArchive/large_image
|
.circleci/make_index.py
|
Python
|
apache-2.0
| 855
| 0
|
#!/usr/bin/env python
import os
import sys
import time
path = 'gh-pages' if len(sys.argv) == 1 else sys.argv[1]
indexName = 'index.html'
template = """<html>
<head><title>large_image_wheels</
|
title></head>
<body>
<h1>large_image_wheels</h1>
<pre>
%LINKS%
</pre>
</body>
"""
link = '<a href="%s" download="%s">%s</a>%s%s%11d'
wheels = [(name, name) for name in os.listdir(path) if name.endswith('whl')]
wheels = sorted(wheels)
maxnamelen = max(len(name) for name, url in wheels)
index = template.replace('%LINKS%', '\n'.join([
link % (
url, name, name, ' ' * (maxnamelen + 3 - len(name)),
time.strftime('
|
%Y-%m-%d %H:%M:%S', time.gmtime(os.path.getmtime(
os.path.join(path, name)))),
os.path.getsize(os.path.join(path, name)),
) for name, url in wheels]))
open(os.path.join(path, indexName), 'w').write(index)
|
polarise/BioClasses
|
FrameshiftSequence.py
|
Python
|
gpl-2.0
| 4,907
| 0.057469
|
# -*- encoding: utf-8 -*-
from __future__ import division
import math
from FrameshiftSite import *
class FrameshiftSequence( object ):
def __init__( self, sequence, path ):
self.path = path
self.path_str = ",".join( map( str, [ a for a,b in path ]))
self.frameshifted_sequence, self.fragments, self.fragment_positions, \
self.signals = self.frameshift_from_path( sequence, path )
self.length = len( self.frameshifted_sequence )
self.frameshift_count = len( self.path ) - 1
self.CAI = None
self.likelihood = None
self.graded_likelihood = None
self.differential_graded_likelihood = None
self.radians = None
self.radian_sums = None
self.indexes = None
self.frameshift_sites = dict()
self.GC_content = None
self.gradient = None
self.partial_gradients = list()
#*****************************************************************************
def __repr__( self ):
return """\
Path: %s
Frameshifted sequence: %s
Fragments: %s
Signals: %s
Length: %s
No. of frameshifts: %s
|
CAI: %s
Log-likelihood: %s"""\
% ( ",".join( map( str, self.path )), \
"...".join([ self.frameshifted_sequence[:20], \
self.frameshifted_sequence[-20:] ]), ", ".join( self.fragments ),\
",".join( self.signals ), self.length, self.frameshift_count, self.CAI, self.likelihood )
#*****************************************************************************
def repr_as_row(
|
self, sep="\t" ):
return sep.join([ "...".join([ self.frameshifted_sequence[:20],
self.frameshifted_sequence[-20:] ]), str( self.length ), \
str( self.frameshift_count ), str( self.CAI ), \
str( self.CAI/math.sqrt( self.frameshift_count + 1 )), ",".join( map( str, self.path )), \
",".join( self.signals ), str( self.likelihood )])
#*****************************************************************************
def frameshift_from_path( self, sequence, path ):
"""
"""
# first get all frame of sequence
sequence_in_frames = dict()
for i in xrange( 3 ):
sequence_in_frames[i] = sequence[i:]
frameshifted_sequence = ""
fragments = list()
fragment_positions = [ 0 ]
frameshift_signals = list()
i = 0
f_i = 0
for f,j in path:
frameshifted_sequence += sequence[i+(f-f_i):j]
fragments.append( sequence[i+(f-f_i):j] )
fragment_positions.append( fragment_positions[-1] + len( sequence[i+(f-f_i):j] ))
frameshift_signals.append( sequence[j-3:j+3] )
i = j
f_i = f
# we could factor in the last trivial nucleotide...
frameshifted_sequence += sequence[-1]
fragments[-1] += sequence[-1]
return frameshifted_sequence, fragments, fragment_positions, frameshift_signals[:-1]
#*****************************************************************************
def find_frameshift_sites( self ):
def frameshift_position_score( x, L ):
"""
triangular function
P( frameshift ) is maximum in the middle and decreases to the edges
"""
if x < L/2:
return x/(L/2)
else:
return ( L - x )/(L/2)
for i in xrange( len( self.indexes ) - 1 ):
if self.indexes[i] == 0 and self.indexes[i + 1] == 0:
initial_node = self.path[i]
final_node = self.path[i+1]
signal = self.signals[i]
radians_vector = self.radians[i]
position_score = frameshift_position_score( initial_node[1], self.length )
self.frameshift_sites[initial_node] = FrameshiftSite( initial_node, \
final_node, signal, self.length, position_score, radians_vector )
#*****************************************************************************
def estimate_GC_content( self ):
self.GC_content = ( self.frameshifted_sequence.count( "C" ) + \
self.frameshifted_sequence.count( "G" ))/self.length
return self.GC_content
#*****************************************************************************
def estimate_gradient( self ):
self.gradient = self.differential_graded_likelihood[-1]/self.length
return self.gradient
#*****************************************************************************
def estimate_partial_gradients( self ):
#print >> sys.stderr, self.fragment_positions, self.length
#print >> sys.stderr, len( self.differential_graded_likelihood )
self.partial_gradients = list()
tau0 = 0
for tau1 in self.fragment_positions[1:-1]:
#print >> sys.stderr, "tau0",tau0,"tau1",tau1
lambda0 = self.differential_graded_likelihood[tau0//3]
lambda1 = self.differential_graded_likelihood[tau1//3]
m = ( lambda1 - lambda0 )/( tau1 - tau0 )
self.partial_gradients.append( m )
tau0 = tau1
tau1 = len( self.differential_graded_likelihood )
lambda0 = self.differential_graded_likelihood[tau0//3]
lambda1 = self.differential_graded_likelihood[-1]
m = ( lambda1 - lambda0 )/( tau1 - tau0 )
self.partial_gradients.append( m )
return self.partial_gradients
|
ajkannan/Classics-Research
|
tf_idf_one_class_svm.py
|
Python
|
mit
| 2,185
| 0.044394
|
from os import listdir
from os.path import isfile, join
from Utilities.Text import Text
from Utilities.TermFrequencyInverseDocumentFrequency import TermFrequencyInverseDocumentFrequency as TFIDF
from sklearn import svm
from pprint import pprint
import numpy as np
from sklearn.decomposition import PCA
def main():
path = "./Texts/"
files = [f for f in listdir(path) if isfile(join(path, f))]
tfidf = TFIDF()
for document in files:
tfidf.add_text_to_corpus(Text(path + document))
features, word_list = tfidf.calculate_features_for_corpus()
apply_pca = True
if apply_pca:
pca = PCA(n_components = features.shape[1])
x = {
"train" : pca.fit_transform(features[[0, 2, 4, 5, 6, 7], :]),
"test" : pca.transform(features[[1, 3], :])
}
else:
x = {
"train" : features[[0, 2, 4, 5, 6, 7], :],
"test" : features[[1, 3], :]
}
# Unfortunately, it does not appear to be possible to derive a perfect
# accuracy solution in the grid search specified below. However, it is
# provided here anyway for educational purposes.
grid_search = False
if grid_search:
for kernel in ["rbf", "linear", "sigmoid", "poly"]:
for nu in np.linspace(0.001,1.0,200):
for gamma in np.linspace(0.0,10.0,200):
clf = svm.OneClassSVM(nu = nu, kernel = kernel, gamma
|
= gamma)
clf.fit(x["train"])
y = {
"trai
|
n" : clf.predict(x["train"]),
"test" : clf.predict(x["test"])
}
if all(y["train"] == 1.0) and all(y["test"] == -1.0):
pprint({"nu" : nu, "gamma" : gamma, "y" : y, "kernel" : kernel})
# The following settings using term-frequency inverse-document frequency
# gives a perfect classification result for the problem of Seneca's
# authorship attribution.
nu, kernel, gamma = 0.84437688442211067, "poly", 0.0
clf = svm.OneClassSVM(nu = nu, kernel = kernel, gamma = gamma)
clf.fit(x["train"])
y = {
"train" : clf.predict(x["train"]),
"test" : clf.predict(x["test"])
}
metrics = {
"train" : clf.decision_function(x["train"]),
"test" : clf.decision_function(x["test"])
}
pprint({"nu" : nu, "gamma" : gamma, "y" : y, "kernel" : kernel, "metrics" : metrics})
if __name__ == "__main__":
main()
|
yephper/django
|
tests/prefetch_related/test_prefetch_related_objects.py
|
Python
|
bsd-3-clause
| 4,853
| 0.001648
|
from django.db.models import Prefetch, prefetch_related_objects
from django.test import TestCase
from .models import Author, Book, Reader
class PrefetchRelatedObjectsTests(TestCase):
"""
Since prefetch_related_objects() is just the inner part of
prefetch_related(), only do basic tests to ensure its API hasn't changed.
"""
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
def test_unknown(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertRaises(AttributeError):
prefetch_related_objects([book1], 'unknown_attribute')
def test_m2m_forward(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], 'authors')
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2, self.author3})
def test_m2m_reverse(self):
author1 = Author.objects.get(id=self.author1.id)
with self.assertNumQueries(1):
prefetch_related_objects([author1], 'books')
with self.assertNumQueries(0):
self.assertEqual(set(author1.books.all()), {self.book1, self.book2})
def test_foreignkey_forward(self):
authors = list(Author.objects.all())
with self.assertNumQueries(1):
prefetch_related_objects(authors, 'first_book')
with self.assertNumQueries(0):
[author.first_book for author in authors]
def test_foreignkey_reverse(self):
books = list(Book.objects.all())
with self.assertNumQueries(1):
prefetch_related_objects(books
|
, 'first_time_authors')
with self.assertNumQueries(0):
[list(book.first_time_authors.all()) for book in books]
def test_m2m_then_m2m(self):
"""
We can follow a m2m and another m2m.
"""
authors = list(Author.objects.all())
with self.ass
|
ertNumQueries(2):
prefetch_related_objects(authors, 'books__read_by')
with self.assertNumQueries(0):
self.assertEqual(
[
[[str(r) for r in b.read_by.all()] for b in a.books.all()]
for a in authors
],
[
[['Amy'], ['Belinda']], # Charlotte - Poems, Jane Eyre
[['Amy']], # Anne - Poems
[['Amy'], []], # Emily - Poems, Wuthering Heights
[['Amy', 'Belinda']], # Jane - Sense and Sense
]
)
def test_prefetch_object(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], Prefetch('authors'))
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2, self.author3})
def test_prefetch_object_to_attr(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], Prefetch('authors', to_attr='the_authors'))
with self.assertNumQueries(0):
self.assertEqual(set(book1.the_authors), {self.author1, self.author2, self.author3})
def test_prefetch_queryset(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects(
[book1],
Prefetch('authors', queryset=Author.objects.filter(id__in=[self.author1.id, self.author2.id]))
)
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2})
|
Qwaz/solved-hacking-problem
|
GoogleCTF/2018 Quals/dm_collision/not_des.py
|
Python
|
gpl-2.0
| 6,722
| 0.012348
|
#!/usr/bin/env python3
import functools
import struct
KEY_SIZE = 8
BLOCK_SIZE = 8
# yapf: disable
# Note the 1-based indexing in all the following tables.
IP = [
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9 ,1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
]
IP_INV = [
40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57,25,
]
E = [
32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1,
]
PC1_C = [
57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
]
PC1_D = [
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4,
]
PC2 = [
14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34, 53,
46, 42, 50, 36, 29, 32,
]
KS_SHIFTS = [1,1,2,2,2,2,2,2,1,2,2,2,2,2,2,1]
P = [
16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25,
]
S1 = [
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
]
S2 = [
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
]
S3 = [
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
]
S4 = [
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
]
S5 = [
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
]
S6 = [
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
]
S7 = [
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
]
S8 = [
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],
]
# yapf: enable
SBOXES = [S6, S4, S1, S5, S3, S2, S8, S7]
def Xor(b1, b2):
"""Xors two bit vectors together."""
return [x ^ y for x, y in zip(b1, b2)]
def Concat(*vectors):
"""Concats vectors."""
return functools.reduce(lambda x, y: x + y, vectors, [])
def Str2Bits(s):
"""Converts a string to a vector of bits."""
assert (isinstance(s, bytes))
def Char2Bits(num):
bits = bin(num)[2:]
bits = '0' * (8 - len(bits)) + bits
return [int(b) for b in bits]
return Concat(* [Char2Bits(c) for c in s])
def Bits2Str(v):
"""Converts a vector of bits to a string."""
def Bits2Char(byte):
return struct.pack('>B', int(''.join([str(b) for b in byte]), 2))
return b''.join([Bits2Char(v[8 * i:8 * i + 8]) for i in range(len(v) // 8)])
def Expand(v):
"""Expands 32bits into 48 bits."""
assert (len(v) == 32)
return [v[E[i] - 1] for i in range(48)]
def LeftShift(v, t=1):
"""Left shitfs (rotates) a vector of bits t times."""
return v[t:] + v[:t]
def KeyScheduler(key):
"""Yields round keys."""
assert (len(key) == 64)
# Only 56 bits are used. A bit in each byte is reserved for pairity checks.
C = [key[PC1_C[i] - 1] for i in range(28)]
D = [key[PC1_D[i] - 1] for i in range(28)]
for ri in range(16):
C = LeftShift(C, KS_SHIFTS[ri])
D = LeftShift(D, KS_SHIFTS[ri])
CD = Concat(C, D)
ki = [CD[PC2[i] - 1] for i in range(48)]
yield ki
def CipherFunction(key, inp):
"""Single confusion-diffusion step."""
assert (len(key) == 48)
assert (len(inp) == 32)
# Confusion step.
res = Xor(Expand(inp), key)
sbox_out = []
for si in range(48 // 6):
sbox_inp = res[6 * si:6 * si + 6]
sbox = SBOXES[si]
row = (int(sbox_inp[0]) << 1) + int(sbox_inp[-1])
col = int(''.join([str(b) for b in sbox_inp[1:5]]), 2)
bits = bin(sbox[row][col])[2:]
bits = '0' * (4 - len(bits)) + bits
sbox_out += [int(b) for b in bits]
# Diffusion step.
res = sbox_out
res = [res[P[i] - 1] for i in range(32)]
return res
def DESEncrypt(plaintext, key):
if isinstance(key, bytes):
key = Str2Bits(key)
assert (len(key) == 64)
if isinstance(plaintext, bytes):
plaintext = Str2Bits(plaintext)
# Initial permutation.
|
plaintext = [plaintext[IP[i] - 1] for i in range(64)]
L, R = plaintext[:32], plaintext[32:]
# Feistel network.
for ki in KeyScheduler(key):
L, R = R, Xor(L, CipherFunction(ki, R))
# Final permutation.
ciphertext = Concat(R, L)
ciphertext = [ciphertext[IP_INV[i] - 1] for i in range(64)]
return Bits
|
2Str(ciphertext)
def DESDecrypt(ciphertext, key):
if isinstance(key, bytes):
key = Str2Bits(key)
assert (len(key) == 64)
if isinstance(ciphertext, bytes):
ciphertext = Str2Bits(ciphertext)
# Initial permutation.
ciphertext = [ciphertext[IP[i] - 1] for i in range(64)]
L, R = ciphertext[:32], ciphertext[32:]
# Feistel network.
for ki in reversed(list(KeyScheduler(key))):
L, R = R, Xor(L, CipherFunction(ki, R))
# Final permutation.
plaintext = Concat(R, L)
plaintext = [plaintext[IP_INV[i] - 1] for i in range(64)]
return Bits2Str(plaintext)
|
joachimmetz/plaso
|
tests/parsers/chrome_cache.py
|
Python
|
apache-2.0
| 1,283
| 0.002338
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Chrome Ca
|
che files parser."""
import unittest
from plaso.parsers import chrome_cache
from tests.parsers import test_lib
class ChromeCacheParserTest(test_lib.ParserTestCase):
"""Tests for the Chrome Cache files parser."""
|
def testParse(self):
"""Tests the Parse function."""
parser = chrome_cache.ChromeCacheParser()
storage_writer = self._ParseFile(['chrome_cache', 'index'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 217)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'chrome:cache:entry',
'date_time': '2014-04-30 16:44:36.226091',
'original_url': (
'https://s.ytimg.com/yts/imgbin/player-common-vfliLfqPT.webp')}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
itakouna/lymph
|
lymph/serializers/base.py
|
Python
|
apache-2.0
| 3,844
| 0.00026
|
import abc
import datetime
import decimal
import functools
import json
import uuid
import pytz
import msgpack
import six
from lymph.utils import Undefined
@six.add_metaclass(abc.ABCMeta)
class ExtensionTypeSerializer(object):
@abc.abstractmethod
def serialize(self, obj):
raise NotImplementedError
@abc.abstractmethod
def deserialize(self, obj):
raise NotImplementedError
class DatetimeSerializer(ExtensionTypeSerializer):
format = '%Y-%m-%dT%H:%M:%SZ'
def serialize(self, obj):
result = obj.strftime(self.format)
if obj.tzinfo:
return str(obj.tzinfo), result
return result
def deserialize(self, obj):
try:
|
tzinfo, obj = obj
except ValueError:
tzinfo = None
result = datetime.datetime.strptime(obj, self.format)
if not tzinfo:
return result
return pytz.timezone(tzinfo).localize(result)
class DateSerializer(ExtensionTypeSerializer):
format = '%Y-%m-%d'
def serialize(self, obj):
return obj.strftime(self.format)
def deserialize(self, obj):
return datetime.datetim
|
e.strptime(obj, self.format).date()
class TimeSerializer(ExtensionTypeSerializer):
format = '%H:%M:%SZ'
def serialize(self, obj):
return obj.strftime(self.format)
def deserialize(self, obj):
return datetime.datetime.strptime(obj, self.format).time()
class StrSerializer(ExtensionTypeSerializer):
def __init__(self, factory):
self.factory = factory
def serialize(self, obj):
return str(obj)
def deserialize(self, obj):
return self.factory(obj)
class SetSerializer(ExtensionTypeSerializer):
def serialize(self, obj):
return list(obj)
def deserialize(self, obj):
return set(obj)
class UndefinedSerializer(ExtensionTypeSerializer):
def serialize(self, obj):
return ''
def deserialize(self, obj):
return Undefined
_extension_type_serializers = {
'datetime': DatetimeSerializer(),
'date': DateSerializer(),
'time': TimeSerializer(),
'Decimal': StrSerializer(decimal.Decimal),
'UUID': StrSerializer(uuid.UUID),
'set': SetSerializer(),
'UndefinedType': UndefinedSerializer(),
}
class BaseSerializer(object):
def __init__(self, dumps=None, loads=None, load=None, dump=None):
self._dumps = dumps
self._loads = loads
self._load = load
self._dump = dump
def dump_object(self, obj):
obj_type = type(obj)
serializer = _extension_type_serializers.get(obj_type.__name__)
if serializer:
obj = {
'__type__': obj_type.__name__,
'_': serializer.serialize(obj),
}
elif hasattr(obj, '_lymph_dump_'):
obj = obj._lymph_dump_()
return obj
def load_object(self, obj):
obj_type = obj.get('__type__')
if obj_type:
serializer = _extension_type_serializers.get(obj_type)
return serializer.deserialize(obj['_'])
return obj
def dumps(self, obj):
return self._dumps(obj, default=self.dump_object)
def loads(self, s):
return self._loads(s, object_hook=self.load_object)
def dump(self, obj, f):
return self._dump(obj, f, default=self.dump_object)
def load(self, f):
return self._load(f, object_hook=self.load_object)
msgpack_serializer = BaseSerializer(
dumps=functools.partial(msgpack.dumps, use_bin_type=True),
loads=functools.partial(msgpack.loads, encoding='utf-8'),
dump=functools.partial(msgpack.dump, use_bin_type=True),
load=functools.partial(msgpack.load, encoding='utf-8'),
)
json_serializer = BaseSerializer(dumps=json.dumps, loads=json.loads, dump=json.dump, load=json.load)
|
anbasile/donderskritikos
|
app/__init__.py
|
Python
|
apache-2.0
| 183
| 0.016393
|
from flask import Flask
# from flask.ext.sqlalchemy import SQLAlchemy
app
|
= Flask(__name__)
app.config.from_o
|
bject('config')
# db = SQLAlchemy(app)
from app import views #, models
|
ChucklesZeClown/learn-python
|
Exercises-learn-python-the-hard-way/ex2-comments-and-pound-characters.py
|
Python
|
apache-2.0
| 301
| 0
|
# A comment, this is so you
|
can read your program later.
# Anything af
|
ter the # is ignored by python.
print "I could have code like this." # and the comment after is ignored
# You can also use a comment to "disable" or comment out a piece of code:
# print "This won't run."
print "This will run."
|
dhzhd1/road_obj_detect
|
rfcn/core/loader.py
|
Python
|
apache-2.0
| 19,594
| 0.001735
|
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Yuwen Xiong
# --------------------------------------------------------
import numpy as np
import mxnet as mx
from mxnet.executor_manager import _split_input_slice
from config.config import config
from utils.image import tensor_vstack
from rpn.rpn import get_rpn_testbatch, get_rpn_batch, assign_anchor
from rcnn import get_rcnn_testbatch, get_rcnn_batch
class TestLoader(mx.io.DataIter):
def __init__(self, roidb, config, batch_size=1, shuffle=False,
has_rpn=False):
super(TestLoader, self).__init__()
# save parameters as properties
self.cfg = config
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.has_rpn = has_rpn
# infer properties from roidb
self.size = len(self.roidb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
if has_rpn:
self.data_name = ['data', 'im_info']
else:
self.data_name = ['data', 'rois']
self.label_name = None
# status variable for synchronization between get_data and get_label
self.cur = 0
self.data = None
self.label = []
self.im_info = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, idata)] for idata in self.data]
@property
def provide_label(self):
return [None for _ in range(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return None
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur < self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.im_info, mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_da
|
ta=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size -
|
self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [[mx.nd.array(idata[name]) for name in self.data_name] for idata in data]
self.im_info = im_info
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [mx.nd.array(data[name]) for name in self.data_name]
self.im_info = im_info
class ROIIter(mx.io.DataIter):
def __init__(self, roidb, config, batch_size=2, shuffle=False, ctx=None, work_load_list=None, aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: ROIIter
"""
super(ROIIter, self).__init__()
# save parameters as properties
self.roidb = roidb
self.cfg = config
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.aspect_grouping = aspect_grouping
# infer properties from roidb
self.size = len(roidb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
self.data_name = ['data', 'rois']
self.label_name = ['label', 'bbox_target', 'bbox_weight']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch_individual()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch_individual()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slices
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
# get each device
|
rec/BiblioPixel
|
test/bibliopixel/colors/closest_colors_test.py
|
Python
|
mit
| 1,015
| 0.000985
|
import collections, unittest
from bibliopixel.colors import closest_colors
closest = closest_colors.closest_colors
class ClosestColorsTest(unittest.TestCase):
def exhaustive(self, metric, start=32, skip=64, report=4):
for color in closest_colors.all_colors(start, skip=skip):
cl = closest(color, metric)
if len(cl) >= report:
yield color, cl
def test_simple(self):
self.assertEqual(closest((255, 0,
|
0)), ['red', 'red 1'])
def test_far(self):
c = closest((0, 0, 64), metric=closest_colors.taxicab)
self.assertEqual(c, ['black', 'navy', 'none', 'off'])
c = closest((64, 0, 0), metric=closest_colors.taxicab)
self.assertEqual(c, ['black',
|
'maroon', 'none', 'off'])
def test_euclidean(self):
ex = list(self.exhaustive(closest_colors.euclidean))
self.assertEqual(ex, [])
def test_taxicab(self):
ex = list(self.exhaustive(closest_colors.taxicab))
self.assertEqual(ex, [])
|
hajicj/FEL-NLP-IR_2016
|
npfl103/io/vectorization.py
|
Python
|
apache-2.0
| 3,709
| 0.00027
|
# -*- coding: utf-8 -*-
"""This module implements a class that..."""
from __future__ import print_function, unicode_literals
import collections
import pprint
from npfl103.io.document import Document
__version__ = "0.0.1"
__author__ = "Jan Hajic jr."
class DocumentVectorizer:
"""The DocumentVectorizer class transforms a Document to a sparse
vector, represented by a dictionary. Dictionary keys are the terms
in the document, values are just ``1`` for each term (we're using
binary sparse vectors to represent the document).
This class is the interface between the ``Document`` representation
of the data, which is just a model of how the data is recorded,
to a vector space.
Note that you can build your own transformations of the vector
space down the line. This is really just the conversion step. Don't
worry about implementing e.g. the TF-IDF transformation at this point.
>>> doc = Document('../test_data/LN-20020102001.vert')
>>> tf = lambda t: t.pos == 'N'
>>> vectorizer = DocumentVectorizer(zones=['TITLE'], field='lemma', token_filter=tf)
>>> v = vectorizer.transform(doc)
>>> pprint.pprint(v)
OrderedDict([('milión`1000000', 1),
('člověk', 1),
('země', 1),
('peníz', 1),
('euro', 1),
('revoluce', 1),
('dějiny', 1),
('kontinent', 1),
('půlnoc', 1)])
Making better vectorizers
-------------------------
When subclassing the Vectorizer, you will have to think about
two things. First, what are your terms going to be? And second,
how are you going to weigh them?
The answer to the first question will be things like "word forms"
or "lemmas" or "disambiguated lemmas and part of speech tags" --
the fields that you have at your disposal in the tokens. You can
even do n-grams, if you feel like it.
The answer to the second question might be "1" or "term frequency
in the document" or "pivot-normalized term frequency".
Both of these decisions are done in the
:meth:`DocumentSparseVectorizer.transform` method. The input of this
method is a :class:`Document`, the output is a dict with term keys
and term weight values. However, for the second part -- how weights
are decided -- it's better to defer transformations of term weights
later down the line.
"""
def __init__(self, zones=None, **vtext_to_stream_kwargs):
"""Initialize the vectorizer."""
self.zones = zones
self.vtext_to_stream_kwargs = vtext_to_stream_kwargs
def transform(self, document):
"""Transforms an incoming document into a dict of tokens.
Default terms: word forms
Default weights: 1 for each term that appears in the document.
"""
output = collections.OrderedDict()
for term in document.tokens(zones=self.zones,
**self.vtext_to_stream_kwargs):
output[term] = 1
return output
class BinaryVectorizer(DocumentVectorizer):
|
"""We suggest using this class in experiments, so that it's obvious
what kind of vectors is coming out of it."""
pass
class TermFrequencyVectorizer(DocumentVectorizer):
"""The vectorizer for obtaining straightforward term frequencies."""
def transform(self, document):
output = collections.OrderedDict()
for term in document.tokens(zones=self.zones,
|
**self.vtext_to_stream_kwargs):
if term not in output:
output[term] = 0
output[term] += 1
return output
|
bzamecnik/chord-labels
|
chord_labels/parser/ChordLabelListener.py
|
Python
|
mit
| 3,465
| 0.012987
|
# Generated from ChordLabel.g4 by ANTLR 4.7
from antlr4 import *
if __name__ is not None and "." in __name__:
from .ChordLabelParser import ChordLabelParser
else:
from ChordLabelParser import ChordLabelParser
# This class defines a complete listener for a parse tree produced by ChordLabelParser.
class ChordLabelListener(ParseTreeListener):
# Enter a parse tree produced by ChordLabelParser#chord.
def enterChord(self, ctx:ChordLabelParser.ChordContext):
pass
# Exit a parse tree produced by ChordLabelParser#chord.
def exitChord(self, ctx:ChordLabelParser.ChordContext):
pass
# Enter a parse tree produced by ChordLabelParser#root.
def enterRoot(self, ctx:ChordLabelParser.RootContext):
pass
# Exit a parse tree produced by ChordLabelParser#root.
def exitRoot(self, ctx:ChordLabelParser.RootContext):
pass
# Enter a parse tree produced by ChordLabelParser#natural.
def enterNatural(self, ctx:ChordLabelParser.NaturalContext):
pass
# Exit a parse tree produced by ChordLabelParser#natural.
def exitNatural(self, ctx:ChordLabelParser.NaturalContext):
pass
# Enter a parse tree produced by ChordLabelParser#modifier.
def enterModifier(self, ctx:ChordLabelParser.ModifierContext):
pass
# Exit a parse tree produced by ChordLabelParser#modifier.
def exitModifier(self, ctx:ChordLabelParser.ModifierContext):
pass
# Enter a parse tree produced by ChordLabelParser#components.
def enterComponents(self, ctx:ChordLabelParser.ComponentsContext):
pass
# Exit a parse tree produced by ChordLabelParser#components.
def exitComponents(self, ctx:ChordLabelParser.ComponentsContext):
pass
# Enter a parse tree produced by ChordLabelParser#component.
def enterComponent(self, ctx:ChordLabelParser.ComponentContext):
pass
# Exit a parse tree produced by ChordLabelParser#component.
def exitComponent(self, ctx:ChordLabelParser.ComponentContext):
pass
# Enter a parse tree produced by ChordLabelParser#missing.
def enterMissing(self, ctx:ChordLabelParser.MissingContext):
pass
# Exit a parse tree produced by ChordLabelParser#missing.
def exitMissing(self, ctx:ChordLabelParser.MissingContext):
pass
# Enter a parse tree produced by ChordLabelParser#degree.
def ent
|
erDegree(self, ctx:ChordLabelParser.DegreeContext):
pass
# Exit a parse tree produced by ChordLabelParser#degree.
def exitDegree(self, ctx:ChordLabelParser.DegreeContext):
pass
# Enter a parse tree produced by ChordLabelParser#interval.
def enterInterval(self, ctx:ChordLabelParser.IntervalContext):
pas
|
s
# Exit a parse tree produced by ChordLabelParser#interval.
def exitInterval(self, ctx:ChordLabelParser.IntervalContext):
pass
# Enter a parse tree produced by ChordLabelParser#bass.
def enterBass(self, ctx:ChordLabelParser.BassContext):
pass
# Exit a parse tree produced by ChordLabelParser#bass.
def exitBass(self, ctx:ChordLabelParser.BassContext):
pass
# Enter a parse tree produced by ChordLabelParser#shorthand.
def enterShorthand(self, ctx:ChordLabelParser.ShorthandContext):
pass
# Exit a parse tree produced by ChordLabelParser#shorthand.
def exitShorthand(self, ctx:ChordLabelParser.ShorthandContext):
pass
|
popazerty/beyonwiz-4.1
|
tools/host_tools/FormatConverter/input.py
|
Python
|
gpl-2.0
| 432
| 0.060185
|
import sys
def inputText():
input = sys.stdin.readline()
return input.strip()
def inputChoices(list, backcmd = "b", backtext = "back"):
repeat = True
while repeat:
repeat = False
count = 0
for item in list:
print count, "-", item
count += 1
print backcmd, "-
|
", backtext
input = inputText()
if input == backcmd:
return None
action = int(input)
if action >= len(list):
repeat = True
ret
|
urn action
|
popuguy/mit-cs-6-034
|
lab1/tests.py
|
Python
|
unlicense
| 15,045
| 0.023862
|
from production import IF, AND, OR, NOT, THEN, run_conditions
import production as lab
from tester import make_test, get_tests, type_encode, type_decode
from zookeeper import ZOOKEEPER_RULES
import random
random.seed()
try:
set()
except NameError:
from sets import Set as set, ImmutableSet as frozenset
### TEST 1 ###
test_short_answer_1_getargs = "ANSWER_1"
def test_short_answer_1_testanswer(val, original_val = None):
return str(val) == '2'
# The antecedent checks data, it does not add any -- it lists the
# questions asked to see if the rule should fire.
make_test(type = 'VALUE',
getargs = test_short_answer_1_getargs,
testanswer = test_short_answer_1_testanswer,
expected_val = "2",
name = test_short_answer_1_getargs
)
### TEST 2 ###
test_short_answer_2_getargs = "ANSWER_2"
def test_short_answer_2_testanswer(val, original_val = None):
return str(val) == 'no'
# Because 'not' is coded in two separate ways. You and I can
# tell what was meant to happen, but the forward chaining doesn't
# understand English, it just sees meaningless bits, and those do
# not match, in this case.
make_test(type = 'VALUE',
getargs = test_short_answer_2_getargs,
testanswer = test_short_answer_2_testanswer,
|
expected_val = "no",
name = test_s
|
hort_answer_2_getargs
)
### TEST 3 ###
test_short_answer_3_getargs = "ANSWER_3"
def test_short_answer_3_testanswer(val, original_val = None):
return str(val) == '2'
# The answer is 2 because, as it says in the lab description, "A
# NOT clause should not introduce new variables - the matcher
# won't know what to do with them." In forward chaining, let's
# suppose there were no assertions of the form '(?x) is dead',
# then we would try to instantiate the consequent, but what would
# we fill the variable with? So we cannot forward chain. Let's
# suppose instead that we found 'Polly is dead' so we did not
# instantiate the consequent. But then in backward chaining, we
# might need 'Martha is pining for the fjords', and nothing says
# that 'Martha is dead' so it would work -- and different forward
# and backward chaining results would be a disaster. So NOT
# statements in the antecedent must not have any variables.
#
# You will also note that one pines for the fjords,
# metaphorically speaking, when one *is* dead. But that's an
# error in knowledge discovery or entry, not in programming.
make_test(type = 'VALUE',
getargs = test_short_answer_3_getargs,
testanswer = test_short_answer_3_testanswer,
expected_val = "2",
name = test_short_answer_3_getargs
)
### TEST 4 ###
test_short_answer_4_getargs = "ANSWER_4"
def test_short_answer_4_testanswer(val, original_val = None):
return str(val) == '1'
# Rule 1's preconditions, that some one thing both have feathers
# and a beak, are met by the data when that thing is Pendergast.
# The consequent changes the data, so the rule fires.
make_test(type = 'VALUE',
getargs = test_short_answer_4_getargs,
testanswer = test_short_answer_4_testanswer,
expected_val = "1",
name = test_short_answer_4_getargs
)
### TEST 5 ###
test_short_answer_5_getargs = "ANSWER_5"
def test_short_answer_5_testanswer(val, original_val = None):
return str(val) == '0'
# The preconditions for Rule 2 are met, but the consequent is
# already present, so it doesn't fire. Same for Rule 1. So no
# rule fires.
make_test(type = 'VALUE',
getargs = test_short_answer_5_getargs,
testanswer = test_short_answer_5_testanswer,
expected_val = "0",
name = test_short_answer_5_getargs
)
### TEST 6 ###
transitive_rule_1_getargs = "TEST_RESULTS_TRANS1"
def transitive_rule_1_testanswer(val, original_val = None):
return ( set(val) == set([ 'a beats b',
'b beats c', 'a beats c' ]) )
# This test checks to make sure that your transitive rule
# produces the correct set of statements given the a/b/c data.
make_test(type = 'VALUE',
getargs = transitive_rule_1_getargs,
testanswer = transitive_rule_1_testanswer,
expected_val = "[ 'a beats b', 'b beats c', 'a beats c' ]",
name = transitive_rule_1_getargs
)
### TEST 7 ###
transitive_rule_2_getargs = "TEST_RESULTS_TRANS2"
def transitive_rule_2_testanswer(val, original_val = None):
return ( set(val)
== set([ 'rock beats rock',
'rock beats scissors',
'rock beats paper',
'scissors beats rock',
'scissors beats scissors',
'scissors beats paper',
'paper beats rock',
'paper beats scissors',
'paper beats paper' ]) )
# This test checks to make sure that your transitive rule produces
# the correct set of statements given the rock-paper-scissors data.
make_test(type = 'VALUE',
getargs = transitive_rule_2_getargs,
testanswer = transitive_rule_2_testanswer,
expected_val = "[ 'rock beats rock', 'rock beats scissors', 'rock beats paper', 'scissors beats rock', 'scissors beats scissors', 'scissors beats paper', 'paper beats rock', 'paper beats scissors', 'paper beats paper' ]",
name = transitive_rule_2_getargs
)
### TEST 8 ###
family_rules_1_getargs = "TEST_RESULTS_1"
expected_family_relations = [
'brother bob alice',
'sister alice bob',
'father chuck bob',
'son bob chuck',
'daughter alice chuck',
'father chuck alice' ]
def family_rules_1_testanswer(val, original_val = None):
return ( set( [ x for x in val
if x.split()[0] in (
'father',
'son',
'daughter',
'brother',
'sister',
) ] )
== set(expected_family_relations))
# This test checks to make sure that your family rules produce
# the correct set of statements given the alice/bob/chuck data.
# Note that it ignores all statements that don't contain any of
# the words 'father', 'son', 'daughter', 'brother', or 'sister',
# so you can include extra statements if it helps you.
make_test(type = 'VALUE',
getargs = family_rules_1_getargs,
testanswer = family_rules_1_testanswer,
expected_val = "added family relations should include: " + str(expected_family_relations),
name = family_rules_1_getargs
)
### TEST 9 ###
family_rules_2_getargs = 'TEST_RESULTS_2'
def family_rules_2_testanswer(val, original_val = None):
return ( set( [ x for x in val
if x.split()[0] == 'cousin' ] )
== set([ 'cousin c1 c3',
'cousin c1 c4',
'cousin c2 c3',
'cousin c2 c4',
'cousin c3 c1',
'cousin c3 c2',
'cousin c4 c1',
'cousin c4 c2',
'cousin d1 d2',
'cousin d2 d1',
'cousin d3 d4',
'cousin d4 d3' ]) )
# This test checks to make sure that your family rules produce
# the correct set of statements given the a/b/c/d data.
make_test(type = 'VALUE',
getargs = family_rules_2_getargs,
testanswer = family_rules_2_testanswer,
expected_val = "Results including " + str([ 'cousin c1 c3',
'cousin c1 c4',
'cousin c2 c3',
'cousin c2 c4',
'cousin c3 c1',
'cousin c3 c2',
'cousin c4 c1',
'cousin c4 c2',
'cousin d1 d2',
'cousin d2 d
|
XcomConvent/xcom40k-shades
|
xcom40k/app/migrations/0026_auto_20120106_2212.py
|
Python
|
apache-2.0
| 1,819
| 0.001649
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('app', '0025_auto_20150923_0843'),
]
operations = [
migrations.CreateModel(
|
name='BlogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('text', models.CharField(max_length=10000)),
('pub_date', models.DateField()),
|
('author', models.ForeignKey(to='app.Account')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BlogEntryTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('desc', models.CharField(max_length=500)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='mission',
name='finalize_date',
field=models.DateField(default=datetime.datetime(2012, 1, 6, 22, 12, 50, 101184, tzinfo=utc)),
preserve_default=False,
),
migrations.AlterField(
model_name='neurorequest',
name='closed_date',
field=models.DateField(),
),
migrations.AddField(
model_name='blogentry',
name='tags',
field=models.ManyToManyField(to='app.BlogEntryTag'),
),
]
|
suut/psychic-happiness
|
async_core.py
|
Python
|
unlicense
| 2,381
| 0.00126
|
#!/usr/local/bin/python3.4
# -*- coding: utf-8 -*-
import threading
import time
import sys
import trace
from inspect import isgeneratorfunction
import format
class KillableThread(threading.Thread):
"""A subclass of threading.Thread, with a kill() method provided by courtsey of Connelly Barnes."""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
"""Start the thread."""
|
self.__run_backup = self.run
self.run = self.__run # Force the Thread to install our trace.
threading.Thread.start(self)
def __run(self):
"""Hacked run function, which installs the trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.l
|
ocaltrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
class FunctionExecutor(KillableThread):
def __init__(self, _f: 'the function to execute', _callback, args, kwargs):
super().__init__()
self._f = _f
self._callback = _callback
self.args = args
self.kwargs = kwargs
def run(self):
ret = self._f(*self.args, **self.kwargs)
if ret is not None:
if repr(type(ret)) == '<class \'generator\'>':
for i in ret:
self._callback(i.format(color=format.color))
else: # TODO: make function to be only generators, not normal functions
print('DEPRECATED: function "', self._f.cmdname, '" is using the return statement', sep='')
self._callback(ret.format(color=format.color))
class ControlThread(threading.Thread):
def __init__(self, _f, _callback, *args, **kwargs):
super().__init__()
self.watched_thread = FunctionExecutor(_f, _callback, args, kwargs)
self._callback = _callback
def run(self):
self.watched_thread.start()
time.sleep(3)
if self.watched_thread.is_alive():
self.watched_thread.kill()
self._callback('timeout')
|
leviroth/praw
|
praw/models/reddit/wikipage.py
|
Python
|
bsd-2-clause
| 8,763
| 0
|
"""Provide the WikiPage class."""
from ...const import API_PATH
from ...util.cache import cachedproperty
from ..listing.generator import ListingGenerator
from .base import RedditBase
from .redditor import Redditor
class WikiPage(RedditBase):
"""An individual WikiPage object.
**Typical Attributes**
This table describes attributes that typically belong to objects of this
class. Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a
guarantee that these attributes will always be present, nor is this list
necessarily comprehensive.
======================= ===================================================
Attribute Description
======================= ===================================================
``content_html`` The contents of the wiki page, as HTML.
``content_md`` The contents of the wiki page, as Markdown.
``may_revise`` A ``bool`` representing whether or not the
authenticated user may edit the wiki page.
``name`` The name of the wiki page.
``revision_by`` The :class:`.Redditor` who authored this
revision of the wiki page.
``revision_date`` The time of this revision, in `Unix Time`_.
``subreddit`` The :class:`.Subreddit` this wiki page belongs to.
======================= ===================================================
.. _Unix Time: https://en.wikipedia.org/wiki/Unix_time
"""
__hash__ = RedditBase.__hash__
@staticmethod
def _revision_generator(subreddit, url, generator_kwargs):
for revision i
|
n ListingGenerator(
subreddit._reddit, url, **generator_kwargs
):
if revision["author"] is not None:
revision["author"] = Redditor(
subreddit._reddit, _data=revision["author"]["data"]
)
revision["page"] = WikiPage(
subreddit._reddit, subreddit, revision["page"], revision["
|
id"]
)
yield revision
@cachedproperty
def mod(self):
"""Provide an instance of :class:`.WikiPageModeration`."""
return WikiPageModeration(self)
def __eq__(self, other):
"""Return whether the other instance equals the current."""
return (
isinstance(other, self.__class__)
and str(self).lower() == str(other).lower()
)
def __init__(self, reddit, subreddit, name, revision=None, _data=None):
"""Construct an instance of the WikiPage object.
:param revision: A specific revision ID to fetch. By default, fetches
the most recent revision.
"""
self.name = name
self._revision = revision
self.subreddit = subreddit
super(WikiPage, self).__init__(reddit, _data=_data)
def __repr__(self):
"""Return an object initialization representation of the instance."""
return "{}(subreddit={!r}, name={!r})".format(
self.__class__.__name__, self.subreddit, self.name
)
def __str__(self):
"""Return a string representation of the instance."""
return "{}/{}".format(self.subreddit, self.name)
def _fetch_info(self):
return (
"wiki_page",
{"subreddit": self.subreddit, "page": self.name},
{"v": self._revision} if self._revision else None,
)
def _fetch_data(self):
name, fields, params = self._fetch_info()
path = API_PATH[name].format(**fields)
return self._reddit.request("GET", path, params)
def _fetch(self):
data = self._fetch_data()
data = data["data"]
if data["revision_by"] is not None:
data["revision_by"] = Redditor(
self._reddit, _data=data["revision_by"]["data"]
)
self.__dict__.update(data)
self._fetched = True
def edit(self, content, reason=None, **other_settings):
"""Edit this WikiPage's contents.
:param content: The updated markdown content of the page.
:param reason: (Optional) The reason for the revision.
:param other_settings: Additional keyword arguments to pass.
"""
other_settings.update(
{"content": content, "page": self.name, "reason": reason}
)
self._reddit.post(
API_PATH["wiki_edit"].format(subreddit=self.subreddit),
data=other_settings,
)
def revision(self, revision):
"""Return a specific version of this page by revision ID.
To view revision ``[ID]`` of ``'praw_test'`` in ``'/r/test'``:
.. code:: python
page = reddit.subreddit('test').wiki['praw_test'].revision('[ID]')
"""
return WikiPage(
self.subreddit._reddit, self.subreddit, self.name, revision
)
def revisions(self, **generator_kwargs):
"""Return a generator for page revisions.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To view the wiki revisions for ``'praw_test'`` in ``'/r/test'`` try:
.. code:: python
for item in reddit.subreddit('test').wiki['praw_test'].revisions():
print(item)
To get :class:`.WikiPage` objects for each revision:
.. code:: python
for item in reddit.subreddit('test').wiki['praw_test'].revisions():
print(item['page'])
"""
url = API_PATH["wiki_page_revisions"].format(
subreddit=self.subreddit, page=self.name
)
return self._revision_generator(self.subreddit, url, generator_kwargs)
class WikiPageModeration(object):
"""Provides a set of moderation functions for a WikiPage."""
def __init__(self, wikipage):
"""Create a WikiPageModeration instance.
:param wikipage: The wikipage to moderate.
"""
self.wikipage = wikipage
def add(self, redditor):
"""Add an editor to this WikiPage.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
To add ``'spez'`` as an editor on the wikipage ``'praw_test'`` try:
.. code:: python
reddit.subreddit('test').wiki['praw_test'].mod.add('spez')
"""
data = {"page": self.wikipage.name, "username": str(redditor)}
url = API_PATH["wiki_page_editor"].format(
subreddit=self.wikipage.subreddit, method="add"
)
self.wikipage._reddit.post(url, data=data)
def remove(self, redditor):
"""Remove an editor from this WikiPage.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
To remove ``'spez'`` as an editor on the wikipage ``'praw_test'`` try:
.. code:: python
reddit.subreddit('test').wiki['praw_test'].mod.remove('spez')
"""
data = {"page": self.wikipage.name, "username": str(redditor)}
url = API_PATH["wiki_page_editor"].format(
subreddit=self.wikipage.subreddit, method="del"
)
self.wikipage._reddit.post(url, data=data)
def settings(self):
"""Return the settings for this WikiPage."""
url = API_PATH["wiki_page_settings"].format(
subreddit=self.wikipage.subreddit, page=self.wikipage.name
)
return self.wikipage._reddit.get(url)["data"]
def update(self, listed, permlevel, **other_settings):
"""Update the settings for this WikiPage.
:param listed: (boolean) Show this page on page list.
:param permlevel: (int) Who can edit this page? (0) use subreddit wiki
permissions, (1) only approved wiki contributors for this page may
edit (see :meth:`.WikiPageModeration.add`), (2) only mods may edit
and view
:param other_settings: Additional keyword arguments to pass.
:returns: The updated WikiPage settings.
To set the wikipage ``'p
|
jmeppley/py-metagenomics
|
assign_paths.py
|
Python
|
mit
| 10,699
| 0
|
#! /usr/bin/env python
"""
Takes a single hit table file and generates a table (or tables) of
pathway/gene family assignments for the query sequences (aka 'reads').
Assignments can be for gene families, gene classes, or pathways. Multiple
pathway or classification levels can be given. If they are, an assignment
will be made at each level.
This differs from assignPathsToReadsFromBlast.py in that: (1) it can
handle CAZy and SEED, (2) it will output multiple levels in one file,
(3) multiple assignments are always printed on multiple lines.
This script will work with KEGG, SEED, or CAZy. CAZy only has one level
of heirarchy, the others have 3. The CAZy heirarchy is apparent from the
hit name and needs no supporting files. KEGG and SEED require mapping files
to identify gene families and heirachy files to report levels other than
the gene family or ortholog level. Both SEED and KEGG have three levels
of classifications that can be indicated with a 1, 2, or 3. The words
"subsystem" and "pathway" are synonyms for level 3.
If a count method is selected that can produce multiple assignments per
read, each assignment will be printed on a new line.
NOTE: in KEGG (and SEED) a single ortholog (role) may belong to multiple
pathways (subsystems). A hit to such an ortholog will result in extra
assignment values for that query sequence (1 for each pathway it belongs to).
"""
import argparse
import logging
import re
from edl import hits, kegg
from edl.util import add_IO_arguments, add_universal_arguments, \
inputIterator, parseMapFile, setup_logging
def main():
description = __doc__
parser = argparse.ArgumentParser(description)
add_IO_arguments(parser)
parser.add_argument("-l", "--level", dest="levels", default=None,
metavar="LEVEL", action="append",
help=""" Level(s) to collect counts on. Use flag
multiple times to specify multiple levels. If multiple
values given, one table produced for each with rank
name appended to file name. Levels can be an integer
(1-3) for KEGG or SEED levels, any one of 'gene',
'role', 'family',
'ko', or 'ortholog' (which are all synonyms), or
anything not synonymous with 'gene' to
get CAZy groups. Defaults to ortholog/role and
levels 1, 2, and 3 for KEGG and SEED
and gene and group for CAZy and COG.""")
parser.add_argument(
'-S',
'--squash',
dest='splitForLevels',
default=True,
action='store_false',
help="Don't split assignment rows if gene maps to multiple pathways, "
"just squash them into one row using python list syntax")
# format, ortholog heirarchy, and more
kegg.add_path_arguments(parser)
# log level and help
add_universal_arguments(parser)
arguments = parser.parse_args()
setup_logging(arguments)
# Set defaults and check for some conflicts
if arguments.levels is None and arguments.heirarchyFile is None:
# using hit names only
arguments.levels = [None]
else:
if arguments.heirarchyFile is None \
and arguments.heirarchyType != 'cazy':
logging.warn("Type: %s" % (arguments.heirarchyType))
parser.error("Cannot select levels without a heirarchy (ko) file")
if arguments.levels is None:
# set a default
if arguments.heirarchyType is 'kegg':
arguments.levels = ['ko', '1', '2', 'pathway']
if arguments.heirarchyType is 'seed':
arguments.levels = ['role', '1', '2', 'subsystem']
else:
arguments.levels = ['gene', 'group']
try:
# Make sure the level list makes sense
arguments.levels = cleanLevels(argumen
|
ts.levels)
except Exception as e:
par
|
ser.error(str(e))
# map reads to hits
if arguments.mapFile is not None:
if arguments.mapStyle == 'auto':
with open(arguments.mapFile) as f:
firstLine = next(f)
while len(firstLine) == 0 or firstLine[0] == '#':
firstLine = next(f)
if koMapRE.search(firstLine):
arguments.mapStyle = 'kegg'
elif seedMapRE.search(firstLine):
arguments.mapStyle = 'seed'
elif tabMapRE.search(firstLine):
arguments.mapStyle = 'tab'
elif cogMapRE.search(firstLine):
arguments.mapStyle = 'cog'
else:
raise Exception(
"Cannot figure out map type from first line:\n%s" %
(firstLine))
logging.info("Map file seems to be: %s" % (arguments.mapStyle))
if arguments.mapStyle == 'kegg':
valueMap = kegg.parseLinkFile(arguments.mapFile)
elif arguments.mapStyle == 'seed':
valueMap = kegg.parseSeedMap(arguments.mapFile)
elif arguments.mapStyle == 'cog':
valueMap = kegg.parseCogMap(arguments.mapFile)
else:
if arguments.parseStyle == hits.GIS:
keyType = int
else:
keyType = None
valueMap = parseMapFile(
arguments.mapFile,
valueType=None,
valueDelim=arguments.tab_map_delim,
keyType=keyType)
if len(valueMap) > 0:
logging.info("Read %d items into map. EG: %s" %
(len(valueMap), next(iter(valueMap.items()))))
else:
logging.warn("Read 0 items into value map!")
else:
valueMap = None
# set up level mapping
levelMappers = [getLevelMapper(lvl, arguments) for lvl in arguments.levels]
# parse input files
for (inhandle, outhandle) in inputIterator(arguments):
logging.debug(
"Reading from %s and writing to %s" %
(inhandle, outhandle))
hitMapIter = hits.parseM8FileIter(
inhandle,
valueMap,
hits.FilterParams.create_from_arguments(arguments),
arguments.parseStyle,
arguments.countMethod,
ignoreEmptyHits=arguments.mappedHitsOnly)
if arguments.levels == [None]:
arguments.levels = ['Hit']
outhandle.write("Read\t%s\n" % ('\t'.join(arguments.levels)))
for read, hitIter in hitMapIter:
assignments = []
for hit in hitIter:
logging.debug("Hit: %s" % (hit))
assignment = []
for levelMapper in levelMappers:
assignment.append(levelMapper(hit))
assignments.append(assignment)
logging.debug("Read %s has %d hits" % (read, len(assignments)))
for assignment in assignments:
for assignmentList in handleMultipleMappings(
assignment, arguments):
outhandle.write(
"%s\t%s\n" %
(read, "\t".join(assignmentList)))
def cleanLevels(levelList):
# don't allow duplicates
newList = list(set(levelList))
newList.sort(key=lambda l: levelList.index(l))
# return levels
return newList
def getCazyGroup(gene):
m = cazyRE.search(gene)
if m:
cazygroup = m.group(1)
logging.debug("Mapping %s to %s" % (gene, cazygroup))
else:
logging.debug(
"Could not parse group from %s with r'%s'" %
(gene, cazyRE.pattern))
cazygroup = gene
return cazygroup
# levels equivalent to returning just the ko/gene
koSyns = [None, 'ko', 'gene', 'ortholog', 'family', 'role']
level3Syns = ['subsystem', 'pathway', 'group']
koMapRE = re.compile(r'\sko:K\d{5}')
seedMapRE = re.compile(r'^Mapped roles:')
cogMapRE = re.compile(r'^\d+\t[KC]OG\d+\t')
tabMapRE = re.compile(r'^[^\t]+\t[^\t+]')
cazyRE = re.compile(r'([a-zA-Z]+)\d+')
def getLevelMapper(level, arguments):
if level in koSyn
|
mudzi42/pynet_class
|
class2/my_snmp.py
|
Python
|
apache-2.0
| 747
| 0.004016
|
#!/usr/bin/env python
"""
4c. Create a script that connects to both routers (pynet-rtr1 and pynet-rtr2)
and prints out both the MIB2 sysName and sysDescr.
"""
from snmp_helper import snmp_get_oid, snmp_extract
COMMUNITY_STRING = 'galileo'
SYS_DESCR = '1.3.6.1.2.1.1.1.0'
SYS_NAME = '1.3.6.1.2.1.1.5.0'
|
def main():
pynet_rtr1 = ('184.105.247.70', COMMUNITY_STRING, 161)
pynet_rtr2 = ('184.105.247.71
|
', COMMUNITY_STRING, 161)
for a_device in (pynet_rtr1, pynet_rtr2):
print "\n"
for the_oid in (SYS_NAME, SYS_DESCR):
snmp_data = snmp_get_oid(a_device, oid=the_oid)
output = snmp_extract(snmp_data)
print output
print "\n"
if __name__ == "__main__":
main()
|
zuck/prometeo-erp
|
core/menus/signals.py
|
Python
|
lgpl-3.0
| 1,904
| 0.004202
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This file is part of the prometeo project.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
__author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.5'
from django.db import models
from django.utils.translation import ugettext_noop as _
import django.dispatch
from models import *
## UTILS ##
def manage_bookmarks(cls):
models.signals.post_save.connect(create_bookmarks, cls)
models.signals.post_delete.connect(delete_bookmarks, cls)
## HANDLERS ##
def create_bookmarks(sender,
|
instance, *args, **kwargs):
"""Creates a new bookmarks list for the given object.
"""
if hasattr(instance, "bookmarks") and not instance.bookmarks:
bookmarks, is_new = Menu.objects.get_or_create(slug="%s_%d_bookmarks" % (sender.__name__.l
|
ower(), instance.pk), description=_("Bookmarks"))
if not is_new:
for l in bookmarks.links.all():
l.delete()
instance.bookmarks = bookmarks
instance.save()
def delete_bookmarks(sender, instance, *args, **kwargs):
"""Deletes the bookmarks list of the given object.
"""
bookmarks = instance.bookmarks
if bookmarks:
bookmarks.delete()
instance.bookmarks = None
|
ee64/data-and-algorithms
|
upload.py
|
Python
|
mit
| 3,932
| 0.003913
|
import requests
import os
print('\n***数据与算法 GitHub自动上传脚本***\n')
username = input('输入你在GitHub上的用户名,如 Robert Ying:')
email = input('输入你注册GitHub用的Email:')
print('\n开始配置Git...')
os.system('git config --global user.name "' + username + '"')
os.system('git config --global user.email ' + email)
print('\n你输入的信息如下:')
os.system('git config user.name')
os.system('git config user.email')
if input('\n信息正确则输入y继续,直接回车则重新输入用户名与Email:') != 'y':
username = input('输入你在GitHub上的用户名,如 Robert Ying:')
email = input('输入你注册GitHub用的Email:')
print('\n你输入的信息如下:')
os.system('git config user.name')
os.system('git config user.email')
url = input(
'\n输入你fork后自己对应仓库的url,如https://github.com/robertying/data-and-algorithms.git,注意!最后有".git":')
name = input('\n输入你的真实姓名:')
payload = {"username": input('输入你的OJ用户名:'), "password": input('输入你的OJ密
|
码:')}
print()
response = requests.post(
'http://lambda.ee.tsinghua.edu.cn/api/auth/login/', data=payload)
answer = requests.get('http://lambda.ee.tsinghua.edu.cn/api/my/submits/', headers={
'Authorization': 'TOKEN ' + response.json()['auth_token']}, params={'page': 1, 'page_size': 1})
co
|
unt = answer.json()['count']
answer = requests.get('http://lambda.ee.tsinghua.edu.cn/api/my/submits/', headers={
'Authorization': 'TOKEN ' + response.json()['auth_token']}, params={'page': 1, 'page_size': count})
results = answer.json()['results']
if not os.path.exists('data-and-algorithms'):
os.system('git clone ' + url)
else:
os.system('cd data-and-algorithms & git pull')
os.system('cd data-and-algorithms & git remote add upstream https://github.com/ee64/data-and-algorithms.git & git fetch upstream & git checkout master & git merge upstream/master')
problem_dir = os.listdir('./data-and-algorithms')
already_walked_results = []
for result in results:
if result['problem_title'] not in already_walked_results and result['problem_title'] in problem_dir:
if result['score'] == 100:
if not os.path.exists('./data-and-algorithms/' + result['problem_title'] + '/' + name):
os.mkdir('./data-and-algorithms/' +
result['problem_title'] + '/' + name)
with open('./data-and-algorithms/' + result['problem_title'] + '/' + name + '/README.md', 'w', encoding='utf-8') as md:
md.write('# ' + result['problem_title'] + '\n\n')
md.write(
'| # | 时间 | 内存 |\n')
md.write(
'|:----------:|:------------------------------:|:------------------------------:|\n')
run_results = results[0]['run_results']
for i in range(len(run_results)):
md.write(
'|' + str(i + 1) + '|' + str(run_results[i][1]) + ' ms|' + str(run_results[i][2]) + ' KB|\n')
with open('./data-and-algorithms/' + result['problem_title'] + '/' + name + '/source.cpp', 'w', encoding='utf-8') as source:
source.write(result['code'] + '\n')
os.system('cd data-and-algorithms & git add .')
os.system('cd data-and-algorithms & git commit -m "Initial commit to ' +
result['problem_title'] + ' by ' + name)
already_walked_results.append(result['problem_title'])
continue
os.system('cd data-and-algorithms & git push origin master')
input('\n上传完成!如果发现没有效果,那么请重新再试一遍。回车退出程序:')
|
carlisson/legend
|
legend.py
|
Python
|
gpl-3.0
| 2,150
| 0.001395
|
#!/usr/bin/env python2
import wx
import time
TRAY_TOOLTIP = 'Personal Legend'
TRAY_ICONS = {
'main': 'icon-main.png',
'c1': 'icon-1card.png',
'c2': 'icon-2cards.png',
'c3': 'icon-3cards.png',
'timer': 'icon-zero.png'
}
TRAY_TIMER = 30 * 60
def create_menu_item(menu, label, func):
item = wx.MenuItem(menu, -1, label)
menu.Bind(wx.EVT_MENU, func, id=item.GetId())
menu.AppendItem(item)
return item
class TaskBarIcon(wx.TaskBarIcon):
def __init__(self):
super(TaskBarIcon, self).__init__()
self.set_status('main')
self.timer = 0
self.Bind(wx.EVT_TASKBAR_LEFT_DOWN, self.on_left_down)
def CreatePopupMenu(self):
menu = wx.Menu()
create_menu_item(
|
menu, 'Timer', self.stop_watch)
menu.Appen
|
dSeparator()
create_menu_item(menu, 'Exit', self.on_exit)
return menu
def set_icon(self, path):
icon = wx.IconFromBitmap(wx.Bitmap(path))
self.SetIcon(icon, TRAY_TOOLTIP)
def set_status(self, st):
self.set_icon(TRAY_ICONS[st])
self.status = st
def on_left_down(self, event):
if self.status == 'main':
self.set_status('c1')
elif self.status == 'c1':
self.set_status('c2')
elif self.status == 'c2':
self.set_status('c3')
elif self.status == 'c3':
self.set_status('main')
def on_hello(self, event):
print('Hello, world!')
def on_exit(self, event):
wx.CallAfter(self.Destroy)
def stop_watch(self, event):
now = time.time()
future = now + TRAY_TIMER
self.timer = 0
self.set_icon('icon-zero.png')
while now < future:
i = 10 - int(10*(future - now)/TRAY_TIMER + 0.8)
if i > self.timer:
self.timer = self.timer + 1
self.set_icon('icon-t' + str(i) + '.png')
time.sleep(0.1)
wx.Yield()
now = time.time()
pass
def main():
#app = wx.PySimpleApp()
app = wx.App(False)
TaskBarIcon()
app.MainLoop()
if __name__ == '__main__':
main()
|
nkgilley/home-assistant
|
tests/components/auth/test_login_flow.py
|
Python
|
apache-2.0
| 3,442
| 0.000291
|
"""Tests for the login flow."""
from . import async_setup_auth
from tests.async_mock import patch
from tests.common import CLIENT_ID, CLIENT_REDIRECT_URI
async def test_fetch_auth_providers(hass, aiohttp_client):
"""Test fetching auth providers."""
client = await async_setup_auth(hass, aiohttp_client)
resp = await client.get("/auth/providers")
assert resp.status == 200
assert await resp.json() == [
{"name": "Example", "type": "insecure_example", "id": None}
]
async def test_fetch_auth_providers_onboarding(hass, aiohttp_client):
"""Test fetching auth providers."""
client = await async_setup_auth(hass, aiohttp_client)
with patch(
"homeassistant.components.onboarding.async_is_user_onboarded",
return_value=False,
):
resp = await client.get("/auth/providers")
assert resp.status == 400
assert await resp.json() == {
"message": "Onboarding not finished",
"code": "onboarding_required",
}
async def test_cannot_get_flows_in_progress(hass, aiohttp_client):
"""Test we cannot get flows in progress."""
client = await async_setup_auth(hass, aiohttp_client,
|
[])
resp = await client.get("/auth/login_flow")
assert resp.status == 405
async def test_invalid_username_password(hass, aiohttp_client):
"""Test we cannot get flows in progress."""
client = await async_setup_auth(hass, aiohttp_client)
|
resp = await client.post(
"/auth/login_flow",
json={
"client_id": CLIENT_ID,
"handler": ["insecure_example", None],
"redirect_uri": CLIENT_REDIRECT_URI,
},
)
assert resp.status == 200
step = await resp.json()
# Incorrect username
resp = await client.post(
f"/auth/login_flow/{step['flow_id']}",
json={
"client_id": CLIENT_ID,
"username": "wrong-user",
"password": "test-pass",
},
)
assert resp.status == 200
step = await resp.json()
assert step["step_id"] == "init"
assert step["errors"]["base"] == "invalid_auth"
# Incorrect password
resp = await client.post(
f"/auth/login_flow/{step['flow_id']}",
json={
"client_id": CLIENT_ID,
"username": "test-user",
"password": "wrong-pass",
},
)
assert resp.status == 200
step = await resp.json()
assert step["step_id"] == "init"
assert step["errors"]["base"] == "invalid_auth"
async def test_login_exist_user(hass, aiohttp_client):
"""Test logging in with exist user."""
client = await async_setup_auth(hass, aiohttp_client, setup_api=True)
cred = await hass.auth.auth_providers[0].async_get_or_create_credentials(
{"username": "test-user"}
)
await hass.auth.async_get_or_create_user(cred)
resp = await client.post(
"/auth/login_flow",
json={
"client_id": CLIENT_ID,
"handler": ["insecure_example", None],
"redirect_uri": CLIENT_REDIRECT_URI,
},
)
assert resp.status == 200
step = await resp.json()
resp = await client.post(
f"/auth/login_flow/{step['flow_id']}",
json={"client_id": CLIENT_ID, "username": "test-user", "password": "test-pass"},
)
assert resp.status == 200
step = await resp.json()
assert step["type"] == "create_entry"
assert len(step["result"]) > 1
|
erickpeirson/jhb-explorer
|
explorer/management/commands/loadauthorresources.py
|
Python
|
gpl-2.0
| 1,837
| 0.002722
|
from django.core.management.base import BaseCommand
from explorer.models import Author, AuthorExternalResource, ExternalResource
import os
import csv
class Command(BaseCommand):
help = """Load data about external authority records for authors."""
def add_arguments(self, parser):
parser.add_argument('filename', nargs=1, type=str,
help="Name of CSV file in [app]/fixtures/.")
def handle(self, *args, **options):
sources = {
'viaf': ExternalResource.VIAF,
'isiscb': ExternalResource.ISISCB,
}
locations = {
'viaf': 'http://viaf.org/viaf/%s/',
'isiscb': 'http://data.isiscb.org/isis/%s/',
}
filename = options.get('filename')[0]
data_path = os.path.join('explorer', 'fixtures', filename)
with open(data_path, 'rU') as f:
reader = csv.reader(f)
data = [line for line in reader][1:]
for pk, name, identifier, source, confidence in data:
if not source or not identifier: # No data for this author.
continue
location = locations[source.lower()] % identifier
defaults = {
'resource_type': sources[source.lower()],
'resource_location': location,
}
resource,_ = ExternalResource.objects.get_or_create(resource_location=location, defaults=defaults)
if confidence:
confidence = float(confidence)
else:
con
|
fidence = 1.0
AuthorExternalResource(
author_id=pk,
resource=resource,
|
confidence=confidence
).save()
self.stdout.write(self.style.SUCCESS(' | '.join[pk, name, identifier, source, confidence]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.