repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
DemocracyClub/EveryElection
|
every_election/apps/election_snooper/migrations/0003_snoopedelection_extra.py
|
Python
|
bsd-3-clause
| 447
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-14 17:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("election_snooper", "0002_auto_20170314_1754")]
operations = [
migrations.AddField(
m
|
odel_name="snoopedelection",
name="extra",
|
field=models.TextField(blank=True),
)
]
|
Lancher/tornado
|
maint/test/appengine/common/runtests.py
|
Python
|
apache-2.0
| 1,918
| 0
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import contextlib
import errno
import os
import random
import signal
import socket
import subprocess
import sys
import time
import urllib2
try:
xrange
except NameError:
xrange = range
if __name__ == "__main__":
tornado_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../..'))
# dev_appserver doesn't seem to set SO_REUSEADDR
port = random.randrange(10000, 11000)
# does dev_appserver.py ever live anywhere but /usr/local/bin?
proc = subprocess.Popen([sys.executable,
"/usr/local/bin/dev_appserver.py",
os.path.dirname(os.path.abspath(__file__)),
"--port=%d" % port,
"--skip_sdk_update_check",
],
cwd=tornado_root)
try:
for i in xrange(50):
with contextlib.closing(socket.socket()) as sock:
err = sock.connect_ex(('localhost', port))
if err == 0:
break
elif err != errno.ECONNREFUSED:
raise Exception("Got unexpected socket
|
error %d" % err)
time.sleep(0.1)
else:
raise Exception("Server didn't start listening")
resp = urllib2.urlopen("http://localhost:%d/" % port)
print(resp.read())
finally:
# dev_appserver sometimes ignores SIGTERM (especially on 2.5),
# so try a few times to kill it.
for sig in [signal.SIGTERM, signal.SIGTERM, signal.SIGKILL]:
os.kill(proc.pid, sig)
res = os.
|
waitpid(proc.pid, os.WNOHANG)
if res != (0, 0):
break
time.sleep(0.1)
else:
os.waitpid(proc.pid, 0)
|
osDanielLee/SelfThinkingRobot
|
AchieveData/CollectData.py
|
Python
|
bsd-3-clause
| 8,610
| 0.038329
|
class CollectData():
"""每小时数据收集类
利用微博高级搜索功能,按关键字搜集一定时间范围内的微博。
大体思路:构造URL,爬取网页,然后解析网页中的微博ID。后续利用微博API进行数据入库。本程序只负责收集微博的ID。
登陆新浪微博,进入高级搜索,输入关键字”空气污染“,选择”实时“,时间为”2013-07-02-2:2013-07-09-2“,地区为”北京“,之后发送请求会发现地址栏变为如下:
http://s.weibo.com/wb/%25E7%25A9%25BA%25E6%25B0%2594%25E6%25B1%25A1%25E6%259F%2593&xsort=time®ion=custom:11:1000×cope=custom:2013-07-02-2:2013-07-09-2&Refer=g
固定地址部分:http://s.weibo.com/wb/
关键字二次UTF-8编码:%25E7%25A9%25BA%25E6%25B0%2594%25E6%25B1%25A1%25E6%259F%2593
排序为“实时”:xsort=time
搜索地区:region=custom:11:1000
搜索时间范围:timescope=custom:2013-07-02-2:2013-07-09-2
可忽略项:Refer=g
显示类似微博:nodup=1 注:这个选项可多收集微博,建议加上。默认不加此参数,省略了部分相似微博。
某次请求的页数:page=1
另外,高级搜索最多返回50页微博,那么时间间隔设置最小为宜。所以该类设置为搜集一定时间段内最多50页微博。
"""
def __init__(self, keyword, startTime, region, savedir, interval='50', flag=True, begin_url_per = "http://s.weibo.com/weibo/"):
self.begin_url_per = begin_url_per #设置固定地址部分,默认为"http://s.weibo.com/weibo/",或者"http://s.weibo.com/wb/"
self.setKeyword(keyword) #设置关键字
self.setStartTimescope(startTime) #设置搜索的开始时间
self.setRegion(region) #设置搜索区域
self.setSave_dir(savedir) #设置结果的存储目录
self.setInterval(interval) #设置邻近网页请求之间的基础时间间隔(注意:过于频繁会被认为是机器人)
self.setFlag(flag) #设置
self.logger = logging.getLogger('main.CollectData') #初始化日志
##设置关键字
##关键字需解码
def setKeyword(self, keyword):
self.keyword = keyword.decode('GBK').encode("utf-8")
print 'twice encode:',self.getKeyWord()
##设置起始范围,间隔为1小时
##格式为:yyyy-mm-dd-HH
def setStartTimescope(self, startTime):
if not (startTime == '-'):
self.timescope = startTime + ":" + startTime
else:
self.timescope = '-'
##设置搜索地区
def setRegion(self, region):
self.region = region
##设置结果的存储目录
def setSave_dir(self, save_dir):
self.save_dir = save_dir
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
##设置邻近网页请求之间的基础时间间隔
def setInterval(self, interval):
self.interval = int(interval)
##设置是否被认为机器人的标志。若为False,需要进入页面,手动输入验证码
def setFlag(self, flag):
self.flag = flag
##构建URL
def getURL(self):
return self.begin_url_per+self.getKeyWord()+"®ion=custom:"+self.region+"&xsort=time×cope=custom:"+self.timescope+"&nodup=1&page="
##关键字需要进行两次urlencode
def getKeyWord(self):
once = urllib.urlencode({"kw":self.keyword})[3:]
return urllib.urlencode({"kw":once})[3:]
##爬取一次请求中的所有网页,最多返回50页
def download(self, url, maxTryNum=4):
content = open(self.save_dir + os.sep + "weibo_ids.txt", "ab") #向结果文件中写微博ID
hasMore = True #某次请求可能少于50页,设置标记,判断是否还有下一页
isCaught = False #某次请求被认为是机器人,设置标记,判断是否被抓住。抓住后,需要复制log中的文件,进入页面,输入验证码
mid_filter = set([]) #过滤重复的微博ID
i = 1 #记录本次请求所返回的页数
while hasMore and i < 51 and (not isCaught): #最多返回50页,对每页进行解析,并写入结果文件
source_url = url + str(i) #构建某页的URL
data = '' #存储该页的网页数据
goon = True #网络中断标记
##网络不好的情况,试着尝试请求三次
for tryNum in range(maxTryNum):
try:
html = urllib2.urlopen(source_url, timeout=12)
data = html.read()
break
except:
if tryNum < (maxTryNum-1):
time.sleep(10)
else:
print 'Internet Connect Error!'
self.logger.error('Internet Connect Error!')
self.logger.info('filePath: ' + savedir)
self.logger.info('url: ' + source_url)
self.logger.info('fileNum: ' + str(fileNum))
self.logger.info('page: ' + str(i))
self.flag = False
goon = False
break
if goon:
lines = data.splitlines()
isCaught = True
for line in lines:
## 判断是否有微博内容,出现这一行,则说明没有被认为是机器人
if line.startswith('<script>STK && STK.pageletM && STK.pageletM.view({"pid":"pl_weibo_direct"'):
isCaught = False
n = line.find('html":"')
if n > 0:
j = line[n + 7: -12].encode("utf-8").decode('unicode_escape').encode("utf-8").replace("\\", "")
## 没有更多结
|
果页面
if (j.find('<div class="search_noresult">') > 0):
hasMore = False
## 有结果的页面
else:
page = etree.HTML(j)
dls = page.xpath(u"//dl") #使用xpath解析
for dl in dls:
|
mid = str(dl.attrib.get('mid'))
if(mid != 'None' and mid not in mid_filter):
mid_filter.add(mid)
content.write(mid)
content.write('\n')
break
lines = None
## 处理被认为是机器人的情况
if isCaught:
print 'Be Caught!'
self.logger.error('Be Caught Error!')
self.logger.info('filePath: ' + savedir)
self.logger.info('url: ' + source_url)
self.logger.info('fileNum: ' + str(fileNum))
self.logger.info('page:' + str(i))
data = None
self.flag = False
break
## 没有更多结果,结束该次请求,跳到下一个请求
if not hasMore:
print 'No More Results!'
if i == 1:
time.sleep(random.randint(55,75))
else:
time.sleep(15)
data = None
break
i += 1
## 设置两个邻近URL请求之间的随机休眠时间,你懂的。目前没有模拟登陆
sleeptime_one = random.randint(self.interval-30,self.interval-10)
sleeptime_two = random.randint(self.interval+10,self.interval+30)
if i%2 == 0:
sleeptime = sleeptime_two
else:
sleeptime = sleeptime_one
print 'sleeping ' + str(sleeptime) + ' seconds...'
time.sleep(sleeptime)
else:
break
content.close()
content = None
##改变搜索的时间范围,有利于获取最多的数据
def getTimescope(self, perTimescope, hours):
if not (perTimescope=='-'):
times_list = perTimescope.split(':')
start_datetime = datetime.datetime.fromtimestamp(time.mktime(time.strptime(times_list[-1],"%Y-%m-%d-%H")))
start_new_datetime = start_datetime + datetime.timedelta(seconds = 3600)
end_new_datetime = start_new_datetime + datetime.timedelta(seconds = 3600*(hours-1))
start_str = start_new_datetime.strftime("%Y-%m-%d-%H")
end_str = end_new_datetime.strftime("%Y-%m-%d-%H")
return start_str + ":" + end_str
else:
return '-'
def main():
logger = logging.getLogger('main')
logFile = './collect.log'
logger.setLevel(logging.DEBUG)
filehandler = logging.FileHandler(logFile)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s: %(message)s')
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
while True:
## 接受键盘输入
keyword = raw_input('Enter the keyword(type \'quit\' to exit ):')
if keyword == 'quit':
sys.exit()
startTime = raw_input('Enter the start time(Format:YYYY-mm-dd-HH):')
region = raw_input('Enter the region([BJ]11:1000,[SH]31:1000,[GZ]44:1,[CD]51:1):')
savedir = raw_input('Enter the save directory(Like C://data//):')
interval = raw_input('Enter the time interval( >30 and deafult:50):')
##实例化收集类,收集指定关键字和起始时间的微博
cd = CollectData(keyword, startTime, region, savedir, interval)
while cd.flag:
print cd.timescope
logger.info(cd.timescope)
url = cd.getURL()
cd.download(url)
cd.timescope = cd.getTimescope(cd.timescope,1) #改变搜索的时间,到下一个小时
else:
cd = None
print '-----------------------------------------------------'
print '-----------------------------------------------------'
else:
logger.removeHandler(filehandler)
logger = None
if __name__ == '__main__':
main()
|
gioman/QGIS
|
tests/src/python/test_qgsserver_wfst.py
|
Python
|
gpl-2.0
| 12,220
| 0.001227
|
# -*- coding: utf-8 -*-
"""
Tests for WFS-T provider using QGIS Server through qgis_wrapped_server.py.
This is an integration test for QGIS Desktop WFS-T provider and QGIS Server
WFS-T that check if QGIS can talk to and uderstand itself.
The test uses testdata/wfs_transactional/wfs_transactional.qgs and three
initially empty shapefiles layrs with points, lines and polygons.
All WFS-T calls are executed through the QGIS WFS data provider.
The three layers are
1. populated with WFS-T
2. checked for geometry and attributes
3. modified with WFS-T
4. checked for geometry and attributes
5. emptied with WFS-T calls to delete
From build dir, run: ctest -R PyQgsServerWFST -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '05/15/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
import re
import subprocess
from shutil import copytree, rmtree
import tempfile
from utilities import unitTestDataPath, waitServer
from qgis.core import (
QgsVectorLayer,
QgsFeature,
QgsGeometry,
QgsPoint,
QgsRectangle,
QgsFeatureRequest,
QgsExpression,
)
from qgis.testing import (
start_app,
unittest,
)
try:
QGIS_SERVER_WFST_PORT = os.environ['QGIS_SERVER_WFST_PORT']
except:
QGIS_SERVER_WFST_PORT = '0' # Auto
qgis_app = start_app()
class TestWFST(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.port = QGIS_SERVER_WFST_PORT
# Create tmp folder
cls.temp_path = tempfile.mkdtemp()
cls.testdata_path = cls.temp_path + '/' + 'wfs_transactional' + '/'
copytree(unitTestDataPath('wfs_transactional') + '/',
cls.temp_path + '/' + 'wfs_transactional')
cls.project_path = cls.temp_path + '/' + 'wfs_transactional' + '/' + \
'wfs_transactional.qgs'
assert os.path.exists(cls.project_path), "Project not found: %s" % \
cls.project_path
# Clean env just to be sure
env_vars = ['QUERY_STRING', 'QGIS_PROJECT_FILE']
for ev in env_vars:
try:
del os.environ[ev]
except KeyError:
pass
# Clear all test layers
for ln in ['test_point', 'test_polygon', 'test_linestring']:
cls._clearLayer(ln)
os.environ['QGIS_SERVER_PORT'] = str(cls.port)
server_path = os.path.dirname(os.path.realpath(__file__)) + \
'/qgis_wrapped_server.py'
cls.server = subprocess.Popen([sys.executable, server_path],
env=os.environ, stdout=subprocess.PIPE)
line = cls.server.stdout.readline()
cls.port = int(re.findall(b':(\d+)', line)[0])
assert cls.port != 0
# Wait for the server process to start
assert waitServer('http://127.0.0.1:%s' % cls.port), "Server is not responding!"
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
cls.server.terminate()
cls.server.wait()
del cls.server
# Clear all test layers
for ln in ['test_point', 'test_polygon', 'test_linestring']:
cls._clearLayer(ln)
rmtree(cls.temp_path)
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
@classmethod
def _clearLayer(cls, layer_name):
"""
Delete all features from a vector layer
"""
layer = cls._getLayer(layer_name)
layer.startEditing()
layer.deleteFeatures([f.id() for f in layer.getFeatures()])
layer.commitChanges()
assert layer.featureCount() == 0
@classmethod
def _getLayer(cls, layer_name):
"""
OGR Layer factory
"""
path = cls.testdata_path + layer_name + '.shp'
layer = QgsVectorLayer(path, layer_name, "ogr")
assert layer.isValid()
return layer
@classmethod
def _getWFSLayer(cls, type_name, layer_name=None):
"""
WFS layer factory
"""
if layer_name is None:
layer_name = 'wfs_' + type_name
parms = {
'srsname': 'EPSG:4326',
'typename': type_name,
'url': 'http://127.0.0.1:%s/?map=%s' % (cls.port,
cls.project_path),
'version': 'auto',
'table': '',
#'sql': '',
}
uri = ' '.join([("%s='%s'" % (k, v)) for k, v in list(parms.items())])
wfs_layer = QgsVectorLayer(uri, layer_name, 'WFS')
assert wfs_layer.isValid()
return wfs_layer
@classmethod
def _getFeatureByAttribute(cls, layer, attr_name, attr_value):
"""
Find the feature and return it, raise exception if not found
"""
request = QgsFeatureRequest(QgsExpression("%s=%s" % (attr_name,
attr_value)))
try:
return next(layer.dataProvider().getFeatures(request))
except StopIteration:
raise Exception("Wrong attributes in WFS layer %s" %
layer.name())
def _checkAddFeatures(self, wfs_layer, layer, features):
"""
Che
|
ck features were added
"""
wfs_layer.dataProvider().addFeatures(features)
layer = self._getLayer(layer.name())
self.assertTrue(layer.isValid())
self.assertEqual(layer.featureCount(), len(features))
self.assertEqual(wfs_layer.dataProvider().featureCount(), len(features))
def _checkUpdateFeatures(self, wfs_layer, old_features, new_features):
|
"""
Check features can be updated
"""
for i in range(len(old_features)):
f = self._getFeatureByAttribute(wfs_layer, 'id', old_features[i]['id'])
self.assertTrue(wfs_layer.dataProvider().changeGeometryValues({f.id(): new_features[i].geometry()}))
self.assertTrue(wfs_layer.dataProvider().changeAttributeValues({f.id(): {0: new_features[i]['id']}}))
self.assertTrue(wfs_layer.dataProvider().changeAttributeValues({f.id(): {1: new_features[i]['name']}}))
def _checkMatchFeatures(self, wfs_layer, features):
"""
Check feature attributes and geometry match
"""
for f in features:
wf = self._getFeatureByAttribute(wfs_layer, 'id', f['id'])
self.assertEqual(wf.geometry().exportToWkt(),
f.geometry().exportToWkt())
self.assertEqual(f['name'], wf['name'])
def _checkDeleteFeatures(self, layer, features):
"""
Delete features
"""
ids = []
for f in features:
wf = self._getFeatureByAttribute(layer, 'id', f['id'])
ids.append(wf.id())
self.assertTrue(layer.dataProvider().deleteFeatures(ids))
def _testLayer(self, wfs_layer, layer, old_features, new_features):
"""
Perform all test steps on the layer.
"""
self.assertEqual(wfs_layer.featureCount(), 0)
self._checkAddFeatures(wfs_layer, layer, old_features)
self._checkMatchFeatures(wfs_layer, old_features)
self.assertEqual(wfs_layer.dataProvider().featureCount(),
len(old_features))
self._checkUpdateFeatures(wfs_layer, old_features, new_features)
self._checkMatchFeatures(wfs_layer, new_features)
self._checkDeleteFeatures(wfs_layer, new_features)
self.assertEqual(wfs_layer.dataProvider().featureCount(), 0)
def testWFSPoints(self):
"""
Adds some points, then check and clear all
"""
layer_name = 'test_point'
layer = self._getLayer(layer_name)
wfs_layer = self._getWFSLayer(lay
|
reisub-de/dmpr-simulator
|
dmprsim/analyze/profile_core.py
|
Python
|
mit
| 432
| 0
|
import cProfile
from pathlib import Path
def main(args, results_dir: Path, scenario_dir: Path):
try:
scenario_dir.mkdir(parents=True)
except FileExistsError:
pass
cProfile.runctx(
'from dmprsim.scenarios.python_pro
|
file import main;'
'main(args, results_dir, scenario_dir)',
glo
|
bals=globals(),
locals=locals(),
filename=str(results_dir / 'profile.pstats'),
)
|
Distrotech/scons
|
test/Entry.py
|
Python
|
mit
| 1,860
| 0.003226
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is
|
hereby granted, free of charge, to any person obtaining
#
|
a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that the Entry() global function and environment method work
correctly, and that the former does not try to expand construction
variables.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(FOO = 'fff', BAR = 'bbb')
print Entry('ddd')
print Entry('$FOO')
print Entry('${BAR}_$BAR')
print env.Entry('eee')
print env.Entry('$FOO')
print env.Entry('${BAR}_$BAR')
""")
test.run(stdout = test.wrap_stdout(read_str = """\
ddd
$FOO
${BAR}_$BAR
eee
fff
bbb_bbb
""", build_str = """\
scons: `.' is up to date.
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
GuessWhoSamFoo/pandas
|
pandas/core/indexes/range.py
|
Python
|
bsd-3-clause
| 24,595
| 0
|
from datetime import timedelta
import operator
from sys import getsizeof
import warnings
import numpy as np
from pandas._libs import index as libindex, lib
import pandas.compat as compat
from pandas.compat import get_range_parameters, lrange, range
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes import concat as _concat
from pandas.core.d
|
types.common import (
is_int64_dtype, is_integer, is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.generic import (
ABCDataFrame, ABCSeries, ABCTimedeltaIndex)
from pandas.core import ops
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_share
|
d_docs
from pandas.core.indexes.numeric import Int64Index
class RangeIndex(Int64Index):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
name : object, optional
Name to be stored in the index
copy : bool, default False
Unused, accepted for homogeneity with other index types.
Attributes
----------
None
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = 'rangeindex'
_engine_type = libindex.Int64Engine
# --------------------------------------------------------------------
# Constructors
def __new__(cls, start=None, stop=None, step=None,
dtype=None, copy=False, name=None, fastpath=None):
if fastpath is not None:
warnings.warn("The 'fastpath' keyword is deprecated, and will be "
"removed in a future version.",
FutureWarning, stacklevel=2)
if fastpath:
return cls._simple_new(start, stop, step, name=name)
cls._validate_dtype(dtype)
# RangeIndex
if isinstance(start, RangeIndex):
if name is None:
name = start.name
return cls._simple_new(name=name,
**dict(start._get_data_as_items()))
# validate the arguments
def ensure_int(value, field):
msg = ("RangeIndex(...) must be called with integers,"
" {value} was passed for {field}")
if not is_scalar(value):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
try:
new_value = int(value)
assert(new_value == value)
except (TypeError, ValueError, AssertionError):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
return new_value
if com._all_none(start, stop, step):
msg = "RangeIndex(...) must be called with integers"
raise TypeError(msg)
elif start is None:
start = 0
else:
start = ensure_int(start, 'start')
if stop is None:
stop = start
start = 0
else:
stop = ensure_int(stop, 'stop')
if step is None:
step = 1
elif step == 0:
raise ValueError("Step must not be zero")
else:
step = ensure_int(step, 'step')
return cls._simple_new(start, stop, step, name)
@classmethod
def from_range(cls, data, name=None, dtype=None, **kwargs):
""" Create RangeIndex from a range (py3), or xrange (py2) object. """
if not isinstance(data, range):
raise TypeError(
'{0}(...) must be called with object coercible to a '
'range, {1} was passed'.format(cls.__name__, repr(data)))
start, stop, step = get_range_parameters(data)
return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs)
@classmethod
def _simple_new(cls, start, stop=None, step=None, name=None,
dtype=None, **kwargs):
result = object.__new__(cls)
# handle passed None, non-integers
if start is None and stop is None:
# empty
start, stop, step = 0, 0, 1
if start is None or not is_integer(start):
try:
return RangeIndex(start, stop, step, name=name, **kwargs)
except TypeError:
return Index(start, stop, step, name=name, **kwargs)
result._start = start
result._stop = stop or 0
result._step = step or 1
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
# --------------------------------------------------------------------
@staticmethod
def _validate_dtype(dtype):
""" require dtype to be None or int64 """
if not (dtype is None or is_int64_dtype(dtype)):
raise TypeError('Invalid to pass a non-int64 dtype to RangeIndex')
@cache_readonly
def _constructor(self):
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self):
return np.arange(self._start, self._stop, self._step, dtype=np.int64)
@cache_readonly
def _int64index(self):
return Int64Index._simple_new(self._data, name=self.name)
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
return [('start', self._start),
('stop', self._stop),
('step', self._step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (self.__class__, d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
# --------------------------------------------------------------------
@cache_readonly
def nbytes(self):
"""
Return the number of bytes in the underlying data
On implementations where this is undetermined (PyPy)
assume 24 bytes for each value
"""
return sum(getsizeof(getattr(self, v), 24) for v in
['_start', '_stop', '_step'])
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self):
return np.dtype(np.int64)
@property
def is_unique(self):
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self):
return self._step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self):
|
ethercrow/ai-challenger
|
game-rps/paper.py
|
Python
|
mit
| 95
| 0.010526
|
#!/usr/bin/e
|
nv python
import sys
for _ in ran
|
ge(101):
print "P\n."
sys.stdout.flush()
|
BhallaLab/benchmarks
|
moose_nrn_equivalence_testing/comparision_with_simple_HH_model/xplot.py
|
Python
|
gpl-2.0
| 2,472
| 0.023463
|
#!/usr/bin/env python
"""xplot.py:
This program uses matplotlib to plot xplot like data.
Last modified: Thu Jul 23, 2015 04:54PM
"""
__author__ = "Dila
|
war Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@iitb.ac.in"
__status__ = "Development"
import sys
|
import pylab
data = {}
def buildData( file ):
global data
with open(file, "r") as f:
xvec = []
yvec = []
for line in f:
if line[0] == ';' or line[0] == '#':
continue
line = line.strip()
if "," in line:
line = line.split(",")
else:
line = line.split()
try:
xvec.append(float(line[0]))
yvec.append(line[1:])
except:
pass
assert len(xvec) == len(yvec)
data[file] = (xvec, yvec)
def zipIt(ys):
""" Zip an n-dims vector.
There are as many sublists as there are elements in each element of list.
"""
result = [[ ] for x in ys[0] ]
for y in ys:
for i, e in enumerate(y):
result[i].append(e)
return result
def plotData( args ):
outFile = args.output
global data
for file in data:
xvec, yx = data[file]
try:
yvecs = zipIt(yx)
except Exception as e:
print("[FATAL] Failed to zip the given elements")
sys.exit(0)
for yvec in yvecs:
pylab.plot(xvec, yvec)
if args.title:
pylab.title(str(args.title))
if not outFile:
pylab.show()
else:
print("[INFO] Saving plots to: {}".format( outFile ))
pylab.savefig(outFile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--file"
, nargs = "+"
, help = "xplot file to plot using matplotlib"
)
parser.add_argument("-o", "--output"
, default = None
, help = "Output file to store plot"
)
parser.add_argument("-t", "--title"
, default = ""
, help = "Title of the plot"
)
args = parser.parse_args()
[ buildData(file) for file in args.file ]
plotData( args )
|
mhmurray/cloaca
|
setup.py
|
Python
|
mit
| 696
| 0.018678
|
from setuptools import setup
import minify.command
setup(name='cloaca',
version='0.1.0',
url='https://github.com/mhmurray/cloaca',
author='Michael Murray',
author_email='mich
|
aelhamburgmurray@gmail.com',
license='MIT',
packages=['cloaca'],
zip_safe=False,
include_package_data=True,
scripts=[
'cloaca/cloacaapp.py'
],
install_requires=[
'tornado>=4.3.0',
'tornadis>=0.7.0',
'bcrypt>=2.0.0',
'futures>=3.0.5',
'minify',
],
cmdclass={
|
'minify_css' : minify.command.minify_css,
},
)
|
wolverineav/neutron
|
neutron/agent/metadata/config.py
|
Python
|
apache-2.0
| 5,135
| 0
|
# Copyright 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron._i18n import _
from neutron.common import utils
SHARED_OPTS = [
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location for Metadata Proxy UNIX domain socket.')),
cfg.StrOpt('metadata_proxy_user',
default='',
help=_("User (uid or name) running metadata proxy after "
"its initialization (if empty: agent effective "
"user).")),
cfg.StrOpt('metadata_proxy_group',
default='',
help=_("Group (gid or name) running metadata proxy after "
"its initialization (if empty: agent effective "
"group)."))
]
DRIVER_OPTS = [
cfg.BoolOpt('metadata_proxy_watch_log',
help=_("Enable/Disable log watch by metadata proxy. It "
"should be disabled when metadata_proxy_user/group "
"is not allowed to read/write its log file and "
"copytruncate logrotate option must be used if "
"logrotate is enabled on metadata proxy log "
"files. Option default value is deduced from "
"metadata_proxy_user: watch log is enabled if "
"metadata_proxy_user is agent effective user "
"id/name.")),
]
METADATA_PROXY_HANDLER_OPTS = [
cfg.StrOpt('auth_ca_cert',
help=_("Certificate Authority public key (CA cert) "
"file for ssl")),
cfg.StrOpt('nova_metadata_ip', default='127.0.0.1',
help=_("IP address used by Nova metadata server.")),
cfg.PortOpt('nova_metadata_port',
default=8775,
help=_("TCP Port used by Nova metadata server.")),
cfg.StrOpt('metadata_proxy_shared_secret',
default='',
help=_('When proxying metadata requests, Neutron signs the '
'Instance-ID hea
|
der with a shared secret to prevent '
'spoof
|
ing. You may select any string for a secret, '
'but it must match here and in the configuration used '
'by the Nova Metadata Server. NOTE: Nova uses the same '
'config key, but in [neutron] section.'),
secret=True),
cfg.StrOpt('nova_metadata_protocol',
default='http',
choices=['http', 'https'],
help=_("Protocol to access nova metadata, http or https")),
cfg.BoolOpt('nova_metadata_insecure', default=False,
help=_("Allow to perform insecure SSL (https) requests to "
"nova metadata")),
cfg.StrOpt('nova_client_cert',
default='',
help=_("Client certificate for nova metadata api server.")),
cfg.StrOpt('nova_client_priv_key',
default='',
help=_("Private key of client certificate."))
]
DEDUCE_MODE = 'deduce'
USER_MODE = 'user'
GROUP_MODE = 'group'
ALL_MODE = 'all'
SOCKET_MODES = (DEDUCE_MODE, USER_MODE, GROUP_MODE, ALL_MODE)
UNIX_DOMAIN_METADATA_PROXY_OPTS = [
cfg.StrOpt('metadata_proxy_socket_mode',
default=DEDUCE_MODE,
choices=SOCKET_MODES,
help=_("Metadata Proxy UNIX domain socket mode, 4 values "
"allowed: "
"'deduce': deduce mode from metadata_proxy_user/group "
"values, "
"'user': set metadata proxy socket mode to 0o644, to "
"use when metadata_proxy_user is agent effective user "
"or root, "
"'group': set metadata proxy socket mode to 0o664, to "
"use when metadata_proxy_group is agent effective "
"group or root, "
"'all': set metadata proxy socket mode to 0o666, to use "
"otherwise.")),
cfg.IntOpt('metadata_workers',
default=utils.cpu_count() // 2,
help=_('Number of separate worker processes for metadata '
'server (defaults to half of the number of CPUs)')),
cfg.IntOpt('metadata_backlog',
default=4096,
help=_('Number of backlog requests to configure the '
'metadata server socket with'))
]
|
google-research/google-research
|
representation_batch_rl/batch_rl/train_eval_online.py
|
Python
|
apache-2.0
| 9,735
| 0.006471
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
# pylint: disable=line-too-long
r"""Run training loop.
"""
# pylint: enable=line-too-long
import os
import random
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.specs.tensor_spec import TensorSpec
import tqdm
from representation_batch_rl.batch_
|
rl import asac
from representation_batch_rl.batch_rl import awr
from representation_batch_rl.batch_rl import ddpg
from representation_batch_rl.batch_rl import evaluation
from representation_batch_rl.batch_rl import pcl
from representation_batch_rl.batch_rl import sac
from representation_batch_rl.batch_rl import sac_v1
from representation_batch_rl.batch_rl.image_utils import image_aug
from represent
|
ation_batch_rl.twin_sac import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'pixels-dm-cartpole-swingup',
'Environment for training/evaluation.')
flags.DEFINE_integer('seed', 42, 'Fixed random seed for training.')
flags.DEFINE_float('actor_lr', 3e-4, 'Actor learning rate.')
flags.DEFINE_float('alpha_lr', 3e-4, 'Temperature learning rate.')
flags.DEFINE_float('critic_lr', 3e-4, 'Critic learning rate.')
flags.DEFINE_integer('deployment_batch_size', 1, 'Batch size.')
flags.DEFINE_integer('sample_batch_size', 256, 'Batch size.')
flags.DEFINE_float('discount', 0.99, 'Discount used for returns.')
flags.DEFINE_float('tau', 0.005,
'Soft update coefficient for the target network.')
flags.DEFINE_integer('max_timesteps', 200_000, 'Max timesteps to train.')
flags.DEFINE_integer('max_length_replay_buffer', 100_000,
'Max replay buffer size (image observations use 100k).')
flags.DEFINE_integer('num_random_actions', 10_000,
'Fill replay buffer with N random actions.')
flags.DEFINE_integer('start_training_timesteps', 10_000,
'Start training when replay buffer contains N timesteps.')
flags.DEFINE_string('save_dir', '/tmp/save/', 'Directory to save results to.')
flags.DEFINE_integer('log_interval', 1_000, 'Log every N timesteps.')
flags.DEFINE_integer('eval_interval', 10_000, 'Evaluate every N timesteps.')
flags.DEFINE_integer('action_repeat', 8,
'(optional) action repeat used when instantiating env.')
flags.DEFINE_integer('frame_stack', 0,
'(optional) frame stack used when instantiating env.')
flags.DEFINE_enum('algo_name', 'sac', [
'ddpg',
'crossnorm_ddpg',
'sac',
'pc_sac',
'pcl',
'crossnorm_sac',
'crr',
'awr',
'sac_v1',
'asac',
], 'Algorithm.')
flags.DEFINE_boolean('eager', False, 'Execute functions eagerly.')
def main(_):
if FLAGS.eager:
tf.config.experimental_run_functions_eagerly(FLAGS.eager)
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
action_repeat = FLAGS.action_repeat
_, _, domain_name, _ = FLAGS.env_name.split('-')
if domain_name in ['cartpole']:
FLAGS.set_default('action_repeat', 8)
elif domain_name in ['reacher', 'cheetah', 'ball_in_cup', 'hopper']:
FLAGS.set_default('action_repeat', 4)
elif domain_name in ['finger', 'walker']:
FLAGS.set_default('action_repeat', 2)
FLAGS.set_default('max_timesteps', FLAGS.max_timesteps // FLAGS.action_repeat)
env = utils.load_env(
FLAGS.env_name, FLAGS.seed, action_repeat, FLAGS.frame_stack)
eval_env = utils.load_env(
FLAGS.env_name, FLAGS.seed, action_repeat, FLAGS.frame_stack)
is_image_obs = (isinstance(env.observation_spec(), TensorSpec) and
len(env.observation_spec().shape) == 3)
spec = (
env.observation_spec(),
env.action_spec(),
env.reward_spec(),
env.reward_spec(), # discount spec
env.observation_spec() # next observation spec
)
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
spec, batch_size=1, max_length=FLAGS.max_length_replay_buffer)
@tf.function
def add_to_replay(state, action, reward, discount, next_states):
replay_buffer.add_batch((state, action, reward, discount, next_states))
hparam_str = utils.make_hparam_string(
FLAGS.xm_parameters, seed=FLAGS.seed, env_name=FLAGS.env_name,
algo_name=FLAGS.algo_name)
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.save_dir, 'tb', hparam_str))
results_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.save_dir, 'results', hparam_str))
if 'ddpg' in FLAGS.algo_name:
model = ddpg.DDPG(
env.observation_spec(),
env.action_spec(),
cross_norm='crossnorm' in FLAGS.algo_name)
elif 'crr' in FLAGS.algo_name:
model = awr.AWR(
env.observation_spec(),
env.action_spec(), f='bin_max')
elif 'awr' in FLAGS.algo_name:
model = awr.AWR(
env.observation_spec(),
env.action_spec(), f='exp_mean')
elif 'sac_v1' in FLAGS.algo_name:
model = sac_v1.SAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
elif 'asac' in FLAGS.algo_name:
model = asac.ASAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
elif 'sac' in FLAGS.algo_name:
model = sac.SAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0],
cross_norm='crossnorm' in FLAGS.algo_name,
pcl_actor_update='pc' in FLAGS.algo_name)
elif 'pcl' in FLAGS.algo_name:
model = pcl.PCL(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
initial_collect_policy = random_tf_policy.RandomTFPolicy(
env.time_step_spec(), env.action_spec())
dataset = replay_buffer.as_dataset(
num_parallel_calls=tf.data.AUTOTUNE,
sample_batch_size=FLAGS.sample_batch_size)
if is_image_obs:
# Augment images as in DRQ.
dataset = dataset.map(image_aug,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False).prefetch(3)
else:
dataset = dataset.prefetch(3)
def repack(*data):
return data[0]
dataset = dataset.map(repack)
replay_buffer_iter = iter(dataset)
previous_time = time.time()
timestep = env.reset()
episode_return = 0
episode_timesteps = 0
step_mult = 1 if action_repeat < 1 else action_repeat
for i in tqdm.tqdm(range(FLAGS.max_timesteps)):
if i % FLAGS.deployment_batch_size == 0:
for _ in range(FLAGS.deployment_batch_size):
if timestep.is_last():
if episode_timesteps > 0:
current_time = time.time()
with summary_writer.as_default():
tf.summary.scalar(
'train/returns',
episode_return,
step=(i + 1) * step_mult)
tf.summary.scalar(
'train/FPS',
episode_timesteps / (current_time - previous_time),
step=(i + 1) * step_mult)
timestep = env.reset()
episode_return = 0
episode_timesteps = 0
previous_time = time.time()
if (replay_buffer.num_frames() < FLAGS.num_random_actions or
replay_buffer.num_frames() < FLAGS.deployment_batch_size):
# Use policy only after the first deployment.
policy_step = initial_collect_policy.action(timestep)
action = policy_step
|
Sjc1000/PyRC
|
Commands/join.py
|
Python
|
gpl-2.0
| 505
| 0.00396
|
#!/usr/bin/env python
def run(c, *channels):
server = c['MainWindow'].ui
|
_plugins['ServerList'].active_server
connection = c['MainWindow'].ui_plugins['ServerList'].servers[server]['connection']
if isinstance(channels, str):
|
channels = [channels]
for channel in channels:
if channel.startswith('#') is False:
channel = '#' + channel
channel = channel.replace('\n', '').replace('\r', '')
connection.send('JOIN ' + channel.strip())
return None
|
Gustry/QuickOSM
|
QuickOSM/test/test_saved_query.py
|
Python
|
gpl-2.0
| 17,258
| 0.002318
|
"""Tests for the preset and the history of queries."""
import json
import os
from qgis.core import QgsCoordinateReferenceSystem, QgsRectangle
from qgis.testing import unittest
from QuickOSM.core.utilities.json_encoder import as_enum
from QuickOSM.core.utilities.query_saved import QueryManagement
from QuickOSM.core.utilities.tools import query_preset
from QuickOSM.definitions.format import Format
from QuickOSM.definitions.gui import Panels
from QuickOSM.ui.dialog import Dialog
from QuickOSM.ui.edit_preset import EditPreset
__copyright__ = 'Copyright 2021, 3Liz'
__license__ = 'GPL version 3'
__email__ = 'info@3liz.org'
class TestBookmarkQuery(unittest.TestCase):
"""Tests for the preset and the history of queries."""
def setUp(self):
"""Set up the tests"""
self.maxDiff = None
self.preset_folder = query_preset()
self.dialog = Dialog()
index = self.dialog.table_keys_values_qq.cellWidget(0, 1).findText('amenity')
self.dialog.table_keys_values_qq.cellWidget(0, 1).setCurrentIndex(index)
index = self.dialog.table_keys_values_qq.cellWidget(0, 2).findText('bench')
self.dialog.table_keys_values_qq.cellWidget(0, 2).setCurrentIndex(index)
self.dialog.places_edits[Panels.QuickQuery].setText('foo')
self.dialog.button_save_query.click()
self.preset = self.dialog.list_personal_preset_mp.item(0)
layout_label = self.dialog.list_personal_preset_mp.itemWidget(self.preset).layout()
self.name_preset = layout_label.itemAt(0).itemAt(0).widget().text()
def set_up_preset_data_text(self) -> dict:
"""Load the data save in the json file linked to the preset."""
preset_file = os.path.join(
self.preset_folder, self.name_preset, self.name_preset + '.json')
with open(preset_file, encoding='utf8') as json_file:
data_preset = json.load(json_file)
return data_preset
def set_up_preset_data(self) -> dict:
"""Load the data save in the json file linked to the preset."""
preset_folder = query_preset()
preset_file = os.path.join(
preset_folder, self.name_preset, self.name_preset + '.json')
with open(preset_file, encoding='utf8') as json_file:
data_preset = json.load(json_file, object_hook=as_enum)
return data_preset
def tearDown(self):
"""End of the tests"""
self.dialog.external_panels[Panels.MapPreset].remove_preset(self.preset, self.name_preset)
def test_save_in_preset(self):
"""Test if the file is save in preset."""
nb_preset = self.dialog.list_personal_preset_mp.count()
self.assertEqual(nb_preset, 1)
self.assertEqual(self.name_preset, 'amenity_bench_foo')
def test_preset_format(self):
"""Test if the file in preset is as expected."""
data_preset = self.set_up_preset_data_text()
expected_json = {
"query":
[
"[out:xml] [timeout:25];\n {{geocodeArea:foo}} -> .area_0;\n(\n"
" node[\"amenity\"=\"bench\"](area.area_0);\n "
"way[\"amenity\"=\"bench\"](area.area_0);\n "
"relation[\"amenity\"=\"bench\"](area.area_0);\n);\n"
"(._;>;);\nout body;"
],
"description":
["All OSM objects with the key 'amenity'='bench' in foo are going to be downloaded."],
"advanced": False,
"file_name": "amenity_bench_foo",
"query_layer_name": ["amenity_bench_foo"],
"query_name": ["Query1"],
"type_multi_request": [[]],
"keys": [["amenity"]],
"values": [["bench"]],
"area": ["foo"],
"bbox": [""],
"output_geom_type":
[
[
{"__enum__": "LayerType.Points"},
{"__enum__": "LayerType.Lines"},
{"__enum__": "LayerType.Multilinestrings"},
{"__enum__": "LayerType.Multipolygons"}
]
],
"white_list_column":
[{"multilinestrings": None, "points": None, "lines": None, "multipolygons": None}],
"output_directory": [""],
"output_format": [{"__enum__": "Format.GeoPackage"}]
}
self.assertDictEqual(expected_json, data_preset)
def test_view_bookmark(self):
"""Test if we can display a preset."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
self.assertEqual(data_preset['file_name'], edit_dialog.preset_name.text())
self.assertEqual(
data_preset['description'], edit_dialog.description.toPlainText().split('\\n')
)
self.assertEqual(data_preset['query_layer_name'][0], edit_dialog.layer_name.text())
self.assertEqual(data_preset['query'][0], edit_dialog.query.toPlainText())
self.assertEqual(data_preset['area'][0], edit_dialog.area.text())
self.assertFalse(edit_dialog.bbox.outputExtent().xMinimum())
self.assertFalse(edit_dialog.bbox.outputExtent().yMinimum())
self.assertTrue(edit_dialog.checkbox_points.isChecked())
self.assertTrue(edit_dialog.checkbox_lines.isChecked())
self.assertTrue(edit_dialog.checkbox_multilinestrings.isChecked())
self.assertTrue(edit_dialog.checkbox_multipolygons.isChecked())
self.assertFalse(edit_dialog.white_points.text())
self.assertFalse(edit_dialog.white_lines.text())
self.assertFalse(edit_dialog.white_multilinestrings.text())
self.assertFalse(edit_dialog.white_multipolygons.text())
self.assertEqual(edit_dialog.combo_output_format.currentData(), Format.GeoPackage)
self.assertEqual(
data_preset['output_directory'][0], edit_dialog.output_directory.filePath()
)
nb_queries = edit_dialog.list_queries.count()
self.assertEqual(nb_queries, 1)
edit_dialog.preset_name.setText('Test a new name')
edit_dialog.button_cancel.click()
self.dialog.external_panels[Panels.MapPreset].update_personal_preset_view()
self.preset = self.dialog.list_personal_preset_mp.item(0)
layout_label = self.dialog.list_personal_preset_mp.itemWidget(self.preset).layout()
self.name_preset = layout_label.itemAt(0).itemAt(0).widget().text()
self.assertNotEqual(self.name_preset, 'Test_a_new_name')
def test_edit_rename_bookmark(self):
"""Test if we can edit and rename a preset."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
edit_dialog.preset_name.setText('Test a new name')
edit_dialog.button_validate.click()
self.dialog.external_panels[Panels.MapPreset].update_personal_preset_view()
self.preset = self.dialog.list_personal_preset_mp.item(0)
layout_label = self.dialog.list_personal_preset_mp.itemWidget(self.preset).layout()
self.name_preset = layout_label.itemAt(0).itemAt(0).widget().text()
self.assertEqual(self.name_preset, 'Test_a_new_name')
def test_edited_bookmark_file(self):
"""Test if we can edit a preset and check the edited json file."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
edit_dialog.description.setPlainText('Be or not to be...\\nShakespear')
edit_dialog.layer_name.setText('Misery')
edit_dialog.query.setPlainText('I would like two pencils please.')
edit_dialog.checkbox_points.setChecked(True)
edit_dialog.checkbox_lines.setChecked(True)
edit_dialog.checkbox_multilinestrings.setChecked(False)
edit_dialog.checkbox_multipolygons.setChecked(False)
edit_dialog.whi
|
te_points.setText('name')
index = edit_dialog.combo_output_format.findData(Format.Kml)
edit_dialog.combo_output_format.setCurrentIndex(index)
edit_dialog.button_validat
|
e.click()
self.preset =
|
Jumpscale/jumpscale_portal8
|
lib/portal/docpreprocessor/DocHandler.py
|
Python
|
apache-2.0
| 2,305
| 0.004338
|
from JumpScale import j
# import re
import os
# import jinja2
from watchdog.events import FileSystemEventHandler
# The default Observer on Linux (InotifyObserver) hangs in the call to `observer.schedule` because the observer uses `threading.Lock`, which is
# monkeypatched by `gevent`. To work around this, I use `PollingObserver`. It's more CPU consuming than `InotifyObserver`, but still better than
# reloading the doc processor
#
#from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver as Observer
class DocHandler(FileSystemEventHandler):
def __init__(self, doc_processor):
|
self.doc_processor = doc_processor
def on_created(self, event):
print(('Document {} added'.format(event.src_path)))
pat
|
h = os.path.dirname(event.src_path)
pathItem = event.src_path
docs = []
if pathItem:
lastDefaultPath = ""
if pathItem.endswith('.wiki'):
lastDefaultPath = os.path.join(self.doc_processor.space_path, '.space', 'default.wiki')
elif pathItem.endswith('.md'):
lastDefaultPath = os.path.join(self.doc_processor.space_path, '.space', 'default.md')
elif pathItem.endswith('.py'):
self.reloadMacro(event)
self.doc_processor.add_doc(pathItem, path, docs=docs, lastDefaultPath=lastDefaultPath)
self.doc_processor.docs[-1].loadFromDisk()
self.doc_processor.docs[-1].preprocess()
def on_modified(self, event):
if event.src_path and not event.is_directory and event.src_path.endswith(".py"):
self.reloadMacro(event)
def reloadMacro(self, event):
for macroexecute in (self.doc_processor.macroexecutorPreprocessor,
self.doc_processor.macroexecutorWiki, self.doc_processor.macroexecutorPage):
for groupname, taskletenginegroup in list(macroexecute.taskletsgroup.items()):
for group, taskletengine in list(taskletenginegroup.taskletEngines.items()):
for tasklet in taskletengine.tasklets:
if tasklet.path == event.src_path:
taskletengine.reloadTasklet(tasklet)
return
on_moved = on_created
|
mganeva/mantid
|
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SingleCrystalDiffuseReduction.py
|
Python
|
gpl-3.0
| 21,263
| 0.007149
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from mantid.api import (DataProcessorAlgorithm, mtd, AlgorithmFactory,
FileProperty, FileAction,
MultipleFileProperty, WorkspaceProperty,
PropertyMode, Progress,
MatrixWorkspaceProperty,
ITableWorkspaceProperty)
from mantid.simpleapi import (LoadIsawUB, MaskDetectors, ConvertUnits,
CropWorkspace, LoadInstrument,
SetGoniometer, ConvertToMD, MDNorm,
MinusMD, Load, DeleteWorkspace,
RenameWorkspaces,
CreateSingleValuedWorkspace, LoadNexus,
MultiplyMD, LoadIsawDetCal, LoadMask,
CopyInstrumentParameters,
ApplyCalibration, CopySample,
RecalculateTrajectoriesExtents,
CropWorkspaceForMDNorm)
from mantid.kernel import VisibleWhenProperty, PropertyCriterion, FloatArrayLengthValidator, FloatArrayProperty, Direction, Property
from mantid import logger
import numpy as np
class SingleCrystalDiffuseReduction(DataProcessorAlgorithm):
temp_workspace_list = ['__run', '__md', '__data', '__norm',
'__bkg', '__bkg_md', '__bkg_data', '__bkg_norm',
'__normalizedData', '__normalizedBackground',
'PreprocessedDetectorsWS']
def category(self):
return "Diffraction\\Reduction"
def seeAlso(self):
return [ "ConvertToMD","MDNormSCDPreprocessIncoherent","MDNorm" ]
def name(self):
return "SingleCrystalDiffuseReduction"
def summary(self):
return "Single Crystal Diffuse Scattering Reduction, normalisation, symmetry and background substraction"
def PyInit(self):
# files to reduce
self.declareProperty(MultipleFileProperty(name="Filename",
extensions=["_event.nxs", ".nxs.h5", ".nxs"]),
"Files to combine in reduction")
# background
self.declareProperty(FileProperty(name="Background",defaultValue="",action=FileAction.OptionalLoad,
extensions=["_event.nxs", ".nxs.h5", ".nxs"]),
"Background run")
self.declareProperty("BackgroundScale", 1.0,
doc="The background will be scaled by this number before being subtracted.")
# Filter by TOF
self.copyProperties('LoadEventNexus', ['FilterByTofMin', 'FilterByTofMax'])
# Vanadium SA and flux
self.declareProperty("ReuseSAFlux", True, "If True then if a previous SolidAngle and Flux has been loaded "
"it will be reused otherwise it will be loaded.")
self.declareProperty(FileProperty(name="SolidAngle",defaultValue="",action=FileAction.Load,
extensions=[".nxs"]),
doc="An input workspace containing momentum integrated vanadium (a measure "
"of the solid angle). See :ref:`MDNormSCDPreprocessIncoherent <algm-MDNormSCDPreprocessIncoherent>` "
"for details")
self.declareProperty(FileProperty(name="Flux",defaultValue="",action=FileAction.Load,
extensions=[".nxs"]),
"An input workspace containing momentum dependent flux. See :ref:`MDnormSCD <algm-MDnormSCD>` for details")
self.declareProperty('MomentumMin', Property.EMPTY_DBL,
doc="Minimum value in momentum. The max of this value and the flux momentum minimum will be used.")
self.declareProperty('MomentumMax', Property.EMPTY_DBL,
doc="Maximum value in momentum. The min of this value and the flux momentum maximum will be used.")
# UBMatrix
self.declareProperty(MultipleFileProperty(name="UBMatrix",
extensions=[".mat", ".ub", ".txt"]),
doc="Path to an ISAW-style UB matrix text file. See :ref:`LoadIsawUB <algm-LoadIsawUB>`")
# Goniometer
self.declareProperty('SetGoniometer', False, "Set which Goniometer to use. See :ref:`SetGoniometer <algm-SetGo
|
niometer>`")
condition = VisibleWhenProperty("SetGoniometer", PropertyCriterion.IsNotDefault)
self.copyProperties('SetGoniometer', ['Goniometers', 'Axis0', 'Axis1', 'Axis2'])
self.setPropertySettings("Goniometers", condition)
self.setPropertySettings('Axis0', condition)
self.setPropertySettings('Axis1', condition)
self.setPropertySettings('Axis2', condition)
self.declareProperty(FloatArra
|
yProperty('OmegaOffset', [], direction=Direction.Input),
doc="Offset to apply to the omega rotation of the Goniometer. Need to provide one value for every run.")
# Corrections
self.declareProperty(FileProperty(name="LoadInstrument",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".xml"]),
"Load a different instrument IDF onto the data from a file. See :ref:`LoadInstrument <algm-LoadInstrument>`")
self.declareProperty(ITableWorkspaceProperty("ApplyCalibration", '',
optional=PropertyMode.Optional,
direction=Direction.Input),
doc='Calibration will be applied using this TableWorkspace using '
':ref:`ApplyCalibration <algm-ApplyCalibration>`.')
self.declareProperty(FileProperty(name="DetCal",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".detcal"]),
"Load an ISAW DetCal calibration onto the data from a file. "
"See :ref:`LoadIsawDetCal <algm-LoadIsawDetCal>`")
self.declareProperty(MatrixWorkspaceProperty("CopyInstrumentParameters", '',
optional=PropertyMode.Optional,
direction=Direction.Input),
doc='The input workpsace from which :ref:`CopyInstrumentParameters <algm-CopyInstrumentParameters>` '
'will copy parameters to data')
self.declareProperty(FileProperty(name="MaskFile",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".xml",".msk"]),
"Masking file for masking. Supported file format is XML and ISIS ASCII. See :ref:`LoadMask <algm-LoadMask>`")
self.copyProperties('MDNorm', ['SymmetryOperations'])
self.declareProperty(FloatArrayProperty('QDimension0', [1, 0, 0], FloatArrayLengthValidator(3), direction=Direction.Input),
"The first Q projection axis")
self.declareProperty(FloatArrayProperty('QDimension1', [0, 1, 0], FloatArrayLengthValidator(3), direction=Direction.Input),
"The second Q projection axis")
self.declareProperty(FloatArrayProperty('QDimension2', [0, 0, 1], FloatArrayLengthValidator(3), direction=Direction.Input),
"The third Q projection axis")
self.copyProperties('MDNorm', ['Dimension0Binning', 'Dimension1Binning', 'Dimension2Binning'])
self.declareProperty('KeepTemporaryWorkspaces', False,
"If True the normaliz
|
gaoxiaofeng/troubleShooting
|
src/troubleshooting/framework/output/writehtml.py
|
Python
|
apache-2.0
| 9,570
| 0.009613
|
# -*- coding: utf-8 -*-
from troubleshooting.framework.modules.manager import ManagerFactory
from troubleshooting.framework.variable.variable import *
from troubleshooting.framework.libraries.baseList import list2stringAndFormat
from troubleshooting.framework.libraries.system import createDir
from troubleshooting.framework.modules.configuration import ConfigManagerInstance
import time
import os,sys
from htmltemplate import *
import re
class html(object):
def __init__(self):
super(html,self).__init__()
self.caseResult = ManagerFactory().getManager(LAYER.Case).case_record
self.currenttime = time.strftime("%Y-%m-%d %X %Z",time.localtime())
def write(self):
data = ""
data += HTML_BEFORE
data += HTML_HEAD
data +="""
<body bgcolor = "#E9EAEE">
<h1 align="center">TroubleShooting Framework Report</h1>
<p><i>%s</i></p>
<table width="100%%" border="2" class="bordered">
<thead>
<tr ><th width="15%%">CaseName</th><th width="5%%" >Status</th><th width="80%%">Attribute</th></tr>
</thead>
<tbody>
"""%(self.currenttime,)
recovery_id = 1
for i,caseName in enumerate(self.caseResult):
i += 1
caseStatus = self.caseResult[caseName]["STATUS"]
DESCRIPTION = self.caseResult[caseName]["DESCRIPTION"]
REFERENCE = self.caseResult[caseName]["REFERENCE"]
REFERENCEHtml = '<a href="%s">reference document</>'%REFERENCE if REFERENCE else '<font color="#d0d0d0">NA</font>'
TAGS = self.caseResult[caseName]["TAGS"]
TESTPOINT = self.caseResult[caseName]["TESTPOINT"]
parent_pass = """
<tr bgcolor="#53C579" class="parent" id="row_0%s"><td colspan="1">%s</td><td>PASS</td><td colspan="1"></td></tr>"""%(i,caseName,)
parent_fail = """
<tr bgcolor="#FF3030" class="parent" id="row_0%s"><td colspan="1">%s</td><td>FAIL</td><td colspan="1"></td></tr>"""%(i,caseName,)
parent_warn = """
<tr bgcolor="#FF7F00" class="parent" id="row_0%s"><td colspan="1">%s</td><td>WARN</td><td colspan="1"></td></tr>"""%(i,caseName,)
if caseStatus:
data += parent_pass
else:
_level = self.caseResult[caseName]["LEVEL"]
if _level is LEVEL.CRITICAL:
data += parent_fail
else:
data += parent_warn
data += """
<tr class="child_row_0%s" style="display:none"><td>Description</td><td></td><td>%s</td></tr>
<tr class="child_row_0%s" style="display:none"><td>Reference</td><td></td><td>%s</td></tr>
<tr class="child_row_0%s" style="display:none"><td>Tags</td><td></td><td>%s</td></tr>
"""%(i,DESCRIPTION,i,REFERENCEHtml,i,TAGS)
data += """
<tr class="child_row_0%s" style="display:none">
<td colspan="3" >
<table border="1" width="100%%
|
" style="margin:0px">
"""%i
data += """
<tr>
<th width="5%%">
<b>TestPoint</b>
</th>
<th wid
|
th="5%%">
<b>Status</b>
</th>
<th width="5%%">
<b>Level</b>
</th>
<th width="15%%" name="nolog">
<b>Impact</b>
</th>
<th width="35%%" name="nolog">
<b>Root Cause</b>
</th>
<th width="15%%" name="nolog">
<b>Fix Method</b>
</th>
<th width="20%%" name="nolog">
<b>Auto Fix Method</b>
</th>
<th style="display:none;" width="85%%" name="log">
<b>LOG</b>
</th>
</tr>
"""
for testpoint in TESTPOINT:
testpointStatus = TESTPOINT[testpoint]["STATUS"]
testpointStatusHtml = '<font color="green"><b><i>%s</i></b></font>' % STATUS.PASS.value.lower() if testpointStatus else '<font color="red"><b><i>%s</i></b></font>' % STATUS.FAIL.value.lower()
testpointImpact = TESTPOINT[testpoint]["IMPACT"]
testpointImpact = list2stringAndFormat(testpointImpact)
if not testpointImpact:
testpointImpact = '<font color="#d0d0d0">NA</font>'
testpointImpactHtml = testpointImpact.replace("\n","</br>")
testpointLevel = TESTPOINT[testpoint]["LEVEL"]
testpointLevelHtml = testpointLevel.value
testpointDescribe = TESTPOINT[testpoint]["DESCRIBE"]
testpointRCA = TESTPOINT[testpoint]["RCA"]
testpointRCA = list2stringAndFormat(testpointRCA)
if not testpointRCA:
testpointRCA = '<font color="#d0d0d0">NA</font>'
testpointRCAHtml = testpointRCA.replace("\n","</br>")
testpointFIXSTEP = TESTPOINT[testpoint]["FIXSTEP"]
testpointFIXSTEP = list2stringAndFormat(testpointFIXSTEP)
if not testpointFIXSTEP:
testpointFIXSTEP = '<font color="#d0d0d0">NA</font>'
testpointFIXSTEPHtml = testpointFIXSTEP.replace("\n","</br>")
testpointAutoFixStep = TESTPOINT[testpoint]["AUTOFIXSTEP"]
if not testpointAutoFixStep:
testpointAutoFixStep = '<font color="#d0d0d0">NA</font>'
else:
if ConfigManagerInstance.config["Host"]:
reportHash = ConfigManagerInstance.config["__ReportHash__"]
reportName = ConfigManagerInstance.config["__ReportName__"]
host = ConfigManagerInstance.config["Host"]
port = ConfigManagerInstance.config["Port"]
user = ConfigManagerInstance.config["User"]
password = ConfigManagerInstance.config["Password"]
cwd =ConfigManagerInstance.config["__ProjectCWD__"]
recovery = {"ProjectDir":cwd,"Host":host,"Port":port,"User":user,"Password":password,"Recovery":",".join(testpointAutoFixStep)}
testpointAutoFixStep = """
<iframe scrolling="no" src="/www/iframe/growl-genie.html?recovery=%s&reportHash=%s&reportName=%s"></iframe>
"""%(recovery,reportHash,reportName)
testpointAutoFixStepHtml = testpointAutoFixStep
testpointLog = TESTPOINT[testpoint]["LOG"]
testpointLogHtml = testpointLog
pattern = re.compile(r"\<.+\>")
match = pattern.finditer(testpointLog)
if match:
for m in match:
className = m.group()
testpointLogHtml = testpointLogHtml.replace(className,'<font color="#FFB90F">%s</font>'%className)
testpointLogHtml = testpointLogHtml.replace("\n", "</br>")
testpointTimeout = TESTPOINT[testpoint]["TIMEOUT"]
testpointCost = TESTPOINT[testpoint]["COST"]
testpointHtml = '<i title="Timeout: %s\nCostTime: %s">%s<i>'%(testpointTimeout,testpointCost,testpoint.strip("{}"))
attribute = """
<tr>
<td>
<i>%s</i>
</td>
<td>
<i>%s</i>
</td>
<td>
|
ptthiem/nose2
|
nose2/tests/unit/test_functions_loader.py
|
Python
|
bsd-2-clause
| 1,464
| 0
|
import unittest
from nose2 import events, loader, session
from nose2.plugins.loader import functions
from nose2.tests._common import TestCase
class TestFunctionLoader(TestCase):
def setUp(self):
self.session = session.Session()
self.loader = loader.PluggableTestLoader(self.session)
self.plugin = functions.Functions(session=self.session)
def test_can_load_test_functions_from_module(self):
class Mod(object):
pass
def test():
pass
m = Mod()
|
m.test = test
event = events.LoadFromModule
|
Event(self.loader, m)
self.session.hooks.loadTestsFromModule(event)
self.assertEqual(len(event.extraTests), 1)
assert isinstance(event.extraTests[0], unittest.FunctionTestCase)
def test_ignores_generator_functions(self):
class Mod(object):
pass
def test():
yield
m = Mod()
m.test = test
event = events.LoadFromModuleEvent(self.loader, m)
self.session.hooks.loadTestsFromModule(event)
self.assertEqual(len(event.extraTests), 0)
def test_ignores_functions_that_take_args(self):
class Mod(object):
pass
def test(a):
pass
m = Mod()
m.test = test
event = events.LoadFromModuleEvent(self.loader, m)
self.session.hooks.loadTestsFromModule(event)
self.assertEqual(len(event.extraTests), 0)
|
PythonCharmers/orange3
|
Orange/src/setup.py
|
Python
|
gpl-3.0
| 324
| 0.018519
|
from distutils.core import setup
from distutils.extension import Extension
from Cyth
|
on.Distutils import build_ext
import numpy as np
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("_orange", ["_orange.pyx"],
include_dirs=[np.get_inclu
|
de()]
)]
)
|
peter-wangxu/python_play
|
test/A.py
|
Python
|
apache-2.0
| 195
| 0.035897
|
__autho
|
r__ = 'wangp11'
AA=1
l1 = [13, 13]
n = 1
def print_l1():
print "id A.py: %d" % id(l1)
print l1
def ex
|
tend_l1():
l1.extend([1,31,31])
print l1
def print_n():
print n
|
lafactura/datea-api
|
datea_api/apps/follow/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 1,483
| 0.004046
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Follow',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('object_id', models.PositiveIntegerField(null=True, blank=True)),
('follow_key', models.CharField(
|
max_length=255)),
('published', models.BooleanField(default=True)),
('client_domain', models.CharField(max_length=100, null=True, verbose_name='CLient Domain', blank=True)),
('content_type', models.ForeignKey(blank=True, to='conte
|
nttypes.ContentType', null=True)),
('user', models.ForeignKey(related_name='follows', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Follow',
'verbose_name_plural': 'Follows',
},
),
migrations.AlterUniqueTogether(
name='follow',
unique_together=set([('user', 'follow_key')]),
),
]
|
DistrictDataLabs/science-bookclub
|
bin/octavo-admin.py
|
Python
|
apache-2.0
| 1,356
| 0.004425
|
#!/usr/bin/env python
# octavo-admin
# An administrative script for our bookclub
|
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Sun Mar 30 20:26:20 2014 -0400
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: octavo-admin.py [] benjamin@bengfort.com $
"""
An administrative script for our bookclub
"""
######################################################
|
####################
## Imports
##########################################################################
import os
import sys
import argparse
##########################################################################
## Command Line Variables
##########################################################################
DESCRIPTION = "An administrative utility for the Science bookclub"
EPILOG = "If there are any bugs or concerns, please comment on Github"
##########################################################################
## Main Method
##########################################################################
def main(*argv):
# Construct the argument parser
parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG)
# Add command line arguments
# TODO
# Parse the arguments from the commandline
options = parser.parse_args()
if __name__ == '__main__':
main(*sys.argv)
|
rockymeza/django-local-requests
|
tests/views.py
|
Python
|
bsd-2-clause
| 1,217
| 0
|
from rest_framework import (
serializers,
viewsets,
)
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Author, Book
class BookSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Book
fields = (
'url',
'author',
'title',
)
class AuthorSerializer(serializers.HyperlinkedModelSerializer):
books = BookSerializer(many=True, read_only=True)
class Meta:
model = Author
fields = (
'url',
'name',
'books',
)
class AuthorViewSet(viewsets.ModelViewSet
|
):
serializer_class = AuthorSerializer
queryset = Author.objects.all()
class BookViewSet(viewsets.M
|
odelViewSet):
serializer_class = BookSerializer
queryset = Book.objects.all()
@api_view(['GET', 'POST'])
def echo(request):
return Response({
'GET': request.GET,
'POST': request.POST,
'META': request.META,
})
@api_view(['POST'])
def upload_file(request):
file = request.FILES['file']
return Response({
'name': file.name,
'content': file.read().decode('utf-8'),
})
|
danhuss/faker
|
faker/providers/currency/ru_RU/__init__.py
|
Python
|
mit
| 7,994
| 0.00017
|
from .. import Provider as CurrencyProvider
class Provider(CurrencyProvider):
# Format: (code, name)
# See currency names in Russian: https://ru.wikipedia.org/wiki/Список_существующих_валют#Валюты
currencies = (
("AED", "Дирхам ОАЭ"),
("AFN", "Афгани"),
("ALL", "Лек"),
("AMD", "Армянский драм"),
("ANG", "Нидерландский антильский гульден"),
("AOA", "Кванза"),
("ARS", "Аргентинское песо"),
("AUD", "Австралийский доллар"),
("AWG", "Арубанский флорин"),
("AZN", "Азербайджанский манат"),
("BAM", "Конвертируемая марка Боснии и Герцеговины"),
("BBD", "Барбадосский доллар"),
("BDT", "Така"),
("BGN", "Болгарский лев"),
("BHD", "Бахрейнский динар"),
("BIF", "Бурундийский франк"),
("BMD", "Бермудский доллар"),
("BND", "Брунейский доллар"),
("BOB", "Боливиано"),
("BRL", "Бразильский реал"),
("BSD", "Багамский доллар"),
("BTN", "Нгултрум"),
("BWP", "Пула"),
("BYR", "Белорусский рубль"),
("BZD", "Белизский доллар"),
("CAD", "Канадский доллар"),
("CDF", "Конголезский франк"),
("CHF", "Швейцарский франк"),
("CLP", "Чилийское песо"),
("CNY", "Юань"),
("COP", "Колумбийское песо"),
("CRC", "Коста-риканский колон"),
("CUC", "Кубанское конвертируемое песо"),
("CUP", "Кубанское песо"),
("CVE", "Эскудо Кабо-Верде"),
("CZK", "Чешская крона"),
|
("DJF", "Франк Джибути"),
("DKK", "Датская крона"),
("DOP", "Доминиканское песо"),
("DZD", "Алжирский динар"),
("EGP", "Египетский фунт"),
("ERN", "Накфа"),
("ETB", "Эфиопский быр"),
("EUR", "Евро"),
("FJD", "Доллар Фиджи"),
("FKP", "Фунт Фолклендских островов"),
("GBP", "Фунт стерлингов"),
("GEL", "Лари"),
("GGP", "Гернсийский фунт"),
("GHS", "Ганский седи"),
|
("GIP", "Гибралтарский фунт"),
("GMD", "Даласи"),
("GNF", "Гвинейский франк"),
("GTQ", "Кетсаль"),
("GYD", "Гайанский доллар"),
("HKD", "Гонконгский доллар"),
("HNL", "Лемпира"),
("HRK", "Хорватская куна"),
("HTG", "Гурд"),
("HUF", "Форинт"),
("IDR", "Индонезийская рупия"),
("ILS", "Новый израильский шекель"),
("NIS", "Новый израильский шекель"),
("IMP", "Фунт острова Мэн"),
("INR", "Индийская рупия"),
("IQD", "Иракский динар"),
("IRR", "Иранский риал"),
("ISK", "Исландская крона"),
("JEP", "Джерсийский фунт"),
("JMD", "Ямайский доллар"),
("JOD", "Иорданский динар"),
("JPY", "Иена"),
("KES", "Кенийский шиллинг"),
("KGS", "Сом"),
("KHR", "Риель"),
("KMF", "Франк Комор"),
("KPW", "Северокорейская вона"),
("KRW", "Южнокорейская вона"),
("KWD", "Кувейтский динар"),
("KYD", "Доллар Островов Кайман"),
("KZT", "Тенге"),
("LAK", "Кип"),
("LBP", "Ливийский фунт"),
("LKR", "Шри-ланкийская рупия"),
("LRD", "Либерийский доллар"),
("LSL", "Лоти"),
("LTL", "Литовский лит"),
("LYD", "Ливийский динар"),
("MAD", "Марокканский дирхам"),
("MDL", "Молдавский лей"),
("MGA", "Малагасийский ариари"),
("MKD", "Денар"),
("MMK", "Кьят"),
("MNT", "Тугрик"),
("MOP", "Патака"),
("MRO", "Угия"),
("MUR", "Маврикийская рупия"),
("MVR", "Рувия"),
("MWK", "Квача"),
("MXN", "Мексиканское песо"),
("MYR", "Малайзийский ринггит"),
("MZN", "Мозамбикский метикал"),
("NAD", "Доллар Намибии"),
("NGN", "Найра"),
("NIO", "Кордоба"),
("NOK", "Норвежская крона"),
("NPR", "Непальская рупия"),
("NZD", "Новозеландский доллар"),
("OMR", "Оманский риал"),
("PAB", "Бальбоа"),
("PEN", "Соль"),
("PGK", "Кина"),
("PHP", "Филиппинское песо"),
("PKR", "Пакистанская рупия"),
("PLN", "Злотый"),
("PYG", "Гуарани"),
("QAR", "Катарский риал"),
("RON", "Румынский лей"),
("RSD", "Сербский динар"),
("RUB", "Российский рубль"),
("RWF", "Франк Руанды"),
("SAR", "Саудовский риял"),
("SBD", "Доллар Соломоновых Островов"),
("SCR", "Сейшельская рупия"),
("SDG", "Суданский фунт"),
("SEK", "Шведская крона"),
("SGD", "Сингапурский доллар"),
("SHP", "Фунт Святой Елены"),
("SLL", "Леоне"),
("SOS", "Сомалийский шиллинг"),
("SPL", "Луиджино"),
("SRD", "Суринамский доллар"),
("STD", "Добра"),
("SVC", "Сальвадорский колон"),
("SYP", "Сирийский фунт"),
("SZL", "Лилангени"),
("THB", "Бат"),
("TJS", "Сомони"),
("TMT", "Новый туркменский манат"),
("TND", "Тунисский динар"),
("TOP", "Паанга"),
("TRY", "Турецкая лира"),
("TTD", "Доллар Тринидада и Тобаго"),
("TVD", "Доллар Тувалу"),
("TWD", "Новый тайваньский доллар"),
("TZS", "Танзанийский шиллинг"),
("UAH", "Гривна"),
("UGX", "Угандийский шиллинг"),
("USD", "Доллар США"),
("UYU", "Уругвайское песо"),
("UZS", "Узбекский сум"),
("VEF", "Суверенный боливар"),
("VND", "Донг"),
("VUV", "Вату"),
("WST", "Тала"),
("XAF", "Франк КФА ВЕАС"),
("XCD", "Восточно-карибский доллар"),
("XDR", "СДР"),
("XOF", "Франк КФА ВСЕАО"),
("XPF", "Франк КФП"),
("YER", "Йеменский риал"),
("ZAR", "Рэнд"),
("ZMW", "Замбийская квача"),
("ZWD", "Доллар Зимбабве"),
)
|
Epikarsios/RssLEDBackpack
|
RssLED.py
|
Python
|
gpl-3.0
| 1,628
| 0.032555
|
import feedparser
import time
# Create display instance on default I2C address (0x70) and bus number.
from Adafruit_LED_Backpack import AlphaNum4
display = AlphaNum4.AlphaNum4()
# Initialize the display. Must be called once before using the display.
display.begin()
#create string(s) with rss address for multiple feeds
RssAddress = "http://feeds.reuters.com/Reuters/domesticNews"
#create feed caled Rss
Rss = feedparser.parse(RssAddress)
#Loop to iterate through all titles in feed sleeping for 1 second between printing
display.clear()
display.write_display()
#Loop through each title of feed
for i in Rss.entrie
|
s:
#prints title to console
print (i.title)
#reset position to begining
pos = 0
#Change string to Uppercase for readability and add --* buffer to begining and end to distinguish titles
CapString = "---*" + i.t
|
itle.upper() + "*---"
# Dashed line in console for aesthetics
print("----------------------------------------------------------------")
#Loop for scrolling through title
for x in range(0,len(CapString)-4):
# Print a 4 character string to the display buffer.
display.print_str(CapString[pos:pos+4])
# Write the display buffer to the hardware. This must be called to
# update the actual display LEDs.
display.write_display()
# Increment position. Wrap back to 0 when the end is reached.
pos += 1
if pos > len(CapString)-4:
pos = 0
# Delay for 0.15 of a second. This can be changed to speed up or slow down the scroll.
time.sleep(0.15)
# Clear out display
display.print_str(" ")
display.write_display()
|
refeed/coala
|
coalib/bearlib/aspects/collections.py
|
Python
|
agpl-3.0
| 390
| 0
|
from coalib.bearlib.aspects.meta import issubaspect, assert_aspect
class aspectlist(list):
"""
List-derived container to hold
|
aspects.
"""
def __init__(self, seq=()):
super().__init__(map(assert_aspect, seq))
def __contains__(self, aspect):
for item in self:
if issubaspect(aspect, item):
return True
|
return False
|
mikewesner-wf/glasshouse
|
appengine/lib/invoke/vendor/pexpect/__init__.py
|
Python
|
apache-2.0
| 78,307
| 0.004572
|
"""Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to the Pexpect system; these are the function,
run() and the class, spawn. The spawn class is more powerful. The run()
function is simpler than spawn, and is good for quickly calling program. When
you call the run() function it executes a given program and then returns the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The spawn class is the more powerful interface to the Pexpect system. You can
use this to spawn a child program then interact with it by sending input and
expecting responses (waiting for patterns in the child's output).
For example::
child = pexpect.spawn('scp foo myname@host.example.com:.')
child.expect ('Password:')
child.sendline (mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams. For example, ssh reads input directly from the TTY
device which bypasses stdin.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
Spiegel, Jan Grant, Shane Kerr and Thomas Kluyver. Let me know if I forgot anyone.
Pexpect is free, open source, and all that good stuff.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Pexpect Copyright (c) 2010 Noah Spurrier
http://pexpect.sourceforge.net/
"""
try:
import os, sys, time
import select
import re
|
import struct
import resource
import types
import pty
import tty
import termios
import fcntl
import errno
import traceback
import signal
except ImportError, e:
raise ImportError (str(e) + """
A critical module was not foun
|
d. Probably this operating system does not
support it. Pexpect is intended for UNIX-like operating systems.""")
__version__ = '2.5.1'
version = __version__
version_info = (2,5,1)
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnb', 'run', 'which',
'split_command_line', '__version__']
# Exception classes used by this module.
class ExceptionPexpect(Exception):
"""Base class for all exceptions raised by this module.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
"""This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. """
tblist = traceback.extract_tb(sys.exc_info()[2])
#tblist = filter(self.__filter_not_pexpect, tblist)
tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
def __filter_not_pexpect(self, trace_list_item):
"""This returns True if list item 0 the string 'pexpect.py' in it. """
if trace_list_item[0].find('pexpect.py') == -1:
return True
else:
return False
class EOF(ExceptionPexpect):
"""Raised when EOF is read from a child. This usually means the child has exited."""
class TIMEOUT(ExceptionPexpect):
"""Raised when a read time exceeds the timeout. """
##class TIMEOUT_PATTERN(TIMEOUT):
## """Raised when the pattern match time exceeds the timeout.
## This is different than a read TIMEOUT because the child process may
## give output, thus never give a TIMEOUT, but the output
## may never match a pattern.
## """
##class MAXBUFFER(ExceptionPexpect):
## """Raised when a scan buffer fills before matching an expected pattern."""
PY3 = (sys.version_info[0] >= 3)
def _cast_bytes(s, enc):
if isinstance(s, unicode):
return s.encode(enc)
return s
def _cast_unicode(s, enc):
if isinstance(s, bytes):
return s.decode(enc)
return s
re_type = type(re.compile(''))
def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None,
logfile=None, cwd=None, env=None, encoding='utf-8'):
"""
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudo ttys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo myname@host.example.com:.')
child.expect ('(?i)password')
child.sendline (mypassword)
The previous code can be replace with the following::
from pexpect import *
run ('scp foo myname@host.example.com:.', events={'(?i)password': mypassword})
Examples
========
Start the apache daemon on the local machine::
from pexpect import *
run ("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run ("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
Tricky Examples
===============
The following will run SSH and execute 'ls -l' on the remote machine. The
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
run ("ssh username@machine.example.com 'ls -l'", events={'(?i)password':'secret\\n'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be a dictionary of patterns an
|
CenterForOpenScience/SHARE
|
api/formattedmetadatarecords/urls.py
|
Python
|
apache-2.0
| 266
| 0.003759
|
from rest_framework.routers import SimpleRouter
from api.formattedmetadatarecords import views
router
|
= SimpleRouter()
router.register(r'formattedmetadatarecords', views.FormattedMetadataRecordViewSet, bas
|
ename='formattedmetadatarecord')
urlpatterns = router.urls
|
kalaspuff/tomodachi
|
tomodachi/launcher.py
|
Python
|
mit
| 12,014
| 0.002164
|
import asyncio
import datetime
import importlib
import logging
import os
import platform
import signal
import sys
import time
from typing import Any, Dict, List, Optional, Set, Union, cast
import tomodachi.__version__
import tomodachi.container
import tomodachi.importer
import tomodachi.invoker
from tomodachi.container import ServiceContainer
from tomodachi.helpers.execution_context import clear_execution_context, clear_services, set_execution_context
from tomodachi.helpers.safe_modules import SAFE_MODULES
from tomodachi.importer import ServiceImporter
CancelledError = asyncio.CancelledError
try:
asyncioexceptions = getattr(asyncio, "exceptions")
if asyncioexceptions:
_CancelledError = asyncioexceptions.CancelledError
except (Exception, ModuleNotFoundError, ImportError):
_CancelledError = asyncio.CancelledError
class ServiceLauncher(object):
_close_waiter: Optional[asyncio.Future] = None
_stopped_waiter: Optional[asyncio.Future] = None
restart_services = False
services: Set = set()
@classmethod
def run_until_complete(
cls,
service_files: Union[List, set],
configuration: Optional[Dict] = None,
watcher: Any = None,
) -> None:
def stop_services() -> None:
asyncio.ensure_future(_stop_services())
async def _stop_services() -> None:
if cls._close_waiter and not cls._close_waiter.done():
cls._close_waiter.set_result(None)
for service in cls.services:
try:
service.stop_service()
except Exception:
pass
if cls._stopped_waiter:
cls._stopped_waiter.set_result(None)
if cls._stopped_waiter:
await cls._stopped_waiter
def sigintHandler(*args: Any) -> None:
sys.stdout.write("\b\b\r")
sys.stdout.flush()
logging.getLogger("system").warning("Received <ctrl+c> interrupt [SIGINT]")
cls.restart_services = False
def sigtermHandler(*args: Any) -> None:
logging.getLogger("system").warning("Received termination signal [SIGTERM]")
cls.restart_services = False
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
if loop and loop.is_closed():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop
|
)
for signame in ("SIGINT", "SIGTERM"):
loop.add_signal_handler(getattr(signal, signame), stop_services)
signal.siginterrupt(signal.SIGTERM, False)
signal.siginterrupt(signal.SIGUSR1, False)
signal.signal(signal.SIGINT, sigintHandler)
signal.signal(signal.SIGTERM, sigtermHandler)
watcher_future = None
if watcher:
async def _watcher_restart(updated_files: Union[L
|
ist, set]) -> None:
cls.restart_services = True
for file in service_files:
try:
ServiceImporter.import_service_file(file)
except (SyntaxError, IndentationError) as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
logging.getLogger("watcher.restart").warning("Service cannot restart due to errors")
cls.restart_services = False
return
pre_import_current_modules = [m for m in sys.modules.keys()]
cwd = os.getcwd()
for file in updated_files:
if file.lower().endswith(".py"):
module_name = file[:-3].replace("/", ".")
module_name_full_path = "{}/{}".format(os.path.realpath(cwd), file)[:-3].replace("/", ".")
try:
for m in pre_import_current_modules:
if m == module_name or (len(m) > len(file) and module_name_full_path.endswith(m)):
ServiceImporter.import_module(file)
except (SyntaxError, IndentationError) as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
logging.getLogger("watcher.restart").warning("Service cannot restart due to errors")
cls.restart_services = False
return
logging.getLogger("watcher.restart").warning("Restarting services")
stop_services()
watcher_future = loop.run_until_complete(watcher.watch(loop=loop, callback_func=_watcher_restart))
cls.restart_services = True
init_modules = [m for m in sys.modules.keys()]
restarting = False
while cls.restart_services:
init_timestamp = time.time()
init_timestamp_str = datetime.datetime.utcfromtimestamp(init_timestamp).isoformat() + "Z"
process_id = os.getpid()
event_loop_alias = ""
event_loop_version = ""
try:
if "uvloop." in str(loop.__class__):
event_loop_alias = "uvloop"
import uvloop # noqa # isort:skip
event_loop_version = str(uvloop.__version__)
elif "asyncio." in str(loop.__class__):
event_loop_alias = "asyncio"
else:
event_loop_alias = "{}.{}".format(loop.__class__.__module__, loop.__class__.__name__)
except Exception:
event_loop_alias = str(loop)
clear_services()
clear_execution_context()
set_execution_context(
{
"tomodachi_version": tomodachi.__version__,
"python_version": platform.python_version(),
"system_platform": platform.system(),
"process_id": process_id,
"init_timestamp": init_timestamp_str,
"event_loop": event_loop_alias,
}
)
if event_loop_alias == "uvloop" and event_loop_version:
set_execution_context(
{
"uvloop_version": event_loop_version,
}
)
if watcher:
tz: Any = None
utc_tz: Any = None
try:
import pytz # noqa # isort:skip
import tzlocal # noqa # isort:skip
utc_tz = pytz.UTC
try:
tz = tzlocal.get_localzone()
if not tz:
tz = pytz.UTC
except Exception:
tz = pytz.UTC
except Exception:
pass
init_local_datetime = (
datetime.datetime.fromtimestamp(init_timestamp)
if tz and tz is not utc_tz and str(tz) != "UTC"
else datetime.datetime.utcfromtimestamp(init_timestamp)
)
print("---")
print("Starting tomodachi services (pid: {}) ...".format(process_id))
for file in service_files:
print("* {}".format(file))
print()
print(
"Current version: tomodachi {} on Python {}".format(
tomodachi.__version__, platform.python_version()
)
)
print(
"Event loop implementation: {}{}".format(
event_loop_alias, " {}".format(event_loop_version) if event_loop_version else ""
)
)
if tz:
print("Local time: {} {}".format(init_local_datetime.strftime("%B %d, %Y - %H:%M:%S,%f"), str(tz)))
print("Timestamp in UTC: {}".format(init_timestamp_str))
|
tbjoern/adventofcode
|
Twentythree/reverse.py
|
Python
|
mit
| 331
| 0.096677
|
# a = 7 or
|
a = 12
b = a
b -= 1
d = a
a = 0
# a += b*d
c = b
a += 1
c -= 1
whi
|
le c != 0
d -= 1
while d !=0
b -= 1
c = b
# c *= 2
d = c
d -= 1
c += 1
while d != 0
tgl c
c = -16
while b > 1
c = 1
# a += 95*95
c = 95
d = 95
a += 1
d -= 1
while d != 0
c -= 1
while c != 0
|
burakbayramli/classnotes
|
algs/algs_105_mesquita/test1.py
|
Python
|
gpl-3.0
| 7,691
| 0.021194
|
import itertools, os
import pandas as pd, sys
import numpy as np, matplotlib.pylab as plt
Q = 1.0 ; T = 1.0
class Game:
def __init__(self,df):
self.df = df.copy()
self.df_orig = self.df.copy()
# dictionaries of df variables - used for speedy access
self.df_capability = df.Capability.to_dict()
self.df_position = df.Position.to_dict()
self.df_salience = df.Salience.to_dict()
self.max_pos = df.Position.max()
self.min_pos = df.Position.min()
def weighted_median(self):
df = self.df.copy()
df['w'] = df.Capability*df.Salience
df = df.sort_index(by='Position',ascending=True)
df['w'] = df['w'] / df['w'].sum()
df['w'] = df['w'].cumsum()
return float(df[df['w']>=0.5].head(1).Position)
def mean(self):
return (self.df.Capability*self.df.Position*self.df.Salience).sum() / \
(self.df.Capability*self.df.Salience).sum()
def Usi_i(self,i,j,ri=1.):
tmp1 = self.df_position[i]-self.df_position[j]
tmp2 = self.max_pos-self.min_pos
return 2. - 4.0 * ( (0.5-0.5*np.abs(float(tmp1)/tmp2) )**ri)
def Ufi_i(self,i,j,ri=1.):
tmp1 = self.df_position[i]-self.df_position[j]
tmp2 = self.df.Position.max()-self.df.Position.min()
return 2. - 4.0 * ( (0.5+0.5*np.abs(float(tmp1)/tmp2) )**ri )
def Usq_i(self,i,ri=1.):
return 2.-(4.*(0.5**ri))
def Ui_ij(self,i,j):
tmp1 = self.df_position[i] - self.df_position[j]
tmp2 = self.max_pos-self.min_pos
return 1. - 2.*np.abs(float(tmp1) / tmp2)
def v(self,i,j,k):
return self.df_capability[i]*self.df_salience[i]*(self.Ui_ij(i,j)-self.Ui_ij(i,k))
def Pi(self,i):
l = np.array([[i,j,k] for (j,k) in itertools.combinations(range(len(self.df)), 2 ) if i!=j and i!=k])
U_filter = np.array(map(lambda (i,j,k): self.Ui_ij(j,i)>self.Ui_ij(i,k), l))
lpos = l[U_filter]
tmp1 = np.sum(map(lambda (i,j,k): self.v(j,i,k), lpos))
tmp2 = np.sum(map(lambda (i,j,k): np.abs(self.v(j,i,k)), l))
return float(tmp1)/tmp2
def Ubi_i(self,i,j,ri=1):
tmp1 = np.abs(self.df_position[i] - self.weighted_median()) + \
np.abs(self.df_position[i] - self.df_position[j])
tmp2 = np.abs(self.max_pos-self.min_pos)
return 2. - (4. * (0.5 - (0.25 * float(tmp1) / tmp2))**ri)
def Uwi_i(self,i,j,ri=1):
tmp1 = np.abs(self.df_position[i] - self.weighted_median()) + \
np.abs(self.df_position[i] - self.df_position[j])
tmp2 = np.abs(self.max_pos-self.min_pos)
return 2. - (4. * (0.5 + (0.25 * float(tmp1) / tmp2))**ri)
def EU_i(self,i,j,r=1):
term1 = self.df_salience[j]*self.Pi(i)*self.Usi_i(i,j,r)
term2 = self.df_salience[j]*(1.-self.Pi(i))*self.Ufi_i(i,j,r)
term3 = (1-self.df_salience[j])*self.Usi_i(i,j,r)
term4 = Q*self.Usq_i(i,r)
term5 = (1.-Q)*( T*self.Ubi_i(i,j,r) + (1.-T)*self.Uwi_i(i,j,r) )
return (term1+term2+term3)-(term4+term5)
def EU_j(self,i,j,r=1):
return self.EU_i(j,i,r)
def Ri(self,i):
# get all j's expect i
l = [x for x in range(len(self.df)) if x!= i]
tmp = np.array(map(lambda x: self.EU_j(i,x), l))
numterm1 = 2*np.sum(tmp)
numterm2 = (len(self.df)-1)*np.max(tmp)
numterm3 = (len(self.df)-1)*np.min(tmp)
return float(numterm1-numterm2-numterm3) / (numterm2-numterm3)
def ri(self,i):
Ri_tmp = self.Ri(i)
return (1-Ri_tmp/3.) / (1+Ri_tmp/3.)
def do_round(self):
df_new = self.df.copy()
# reinit
self.df_capability = se
|
lf.df.Capability.to_dict()
self.df_position = self.df.Position.to_dict()
self.df_salience = self.df.Salience.to_dict()
self.max_pos = self.df.Position.max()
self.min_pos = self.df
|
.Position.min()
self.df_orig_position = self.df_orig.Position.to_dict()
offers = [list() for i in range(len(self.df))]
ris = [self.ri(i) for i in range(len(self.df))]
for (i,j) in itertools.combinations(range(len(self.df)), 2 ):
if i==j: continue
eui = self.EU_i(i,j,r=ris[i])
euj = self.EU_j(i,j,r=ris[j])
if eui > 0 and euj > 0 and np.abs(eui) > np.abs(euj):
# conflict - actor i has upper hand
j_moves = self.df_position[i]-self.df_orig_position[j]
print i,j,eui,euj,'conflict', i, 'wins', j, 'moves',j_moves
offers[j].append(j_moves)
elif eui > 0 and euj > 0 and np.abs(eui) < np.abs(euj):
# conflict - actor j has upper hand
i_moves = self.df_position[j]-self.df_orig_position[i]
print i,j,eui,euj,'conflict', j, 'wins', i, 'moves',i_moves
offers[i].append(i_moves)
elif eui > 0 and euj < 0 and np.abs(eui) > np.abs(euj):
# compromise - actor i has the upper hand
print i,j,eui,euj,'compromise', i, 'upper hand'
xhat = (self.df_position[i]-self.df_orig_position[j]) * np.abs(euj/eui)
offers[j].append(xhat)
elif eui < 0 and euj > 0 and np.abs(eui) < np.abs(euj):
# compromise - actor j has the upper hand
print i,j,eui,euj,'compromise', j, 'upper hand'
xhat = (self.df_position[j]-self.df_orig_position[i]) * np.abs(eui/euj)
offers[i].append(xhat)
elif eui > 0 and euj < 0 and np.abs(eui) < np.abs(euj):
# capitulation - actor i has upper hand
j_moves = self.df_position[i]-self.df_orig_position[j]
print i,j,eui,euj,'capitulate', i, 'wins', j, 'moves',j_moves
offers[j].append(j_moves)
elif eui < 0 and euj > 0 and np.abs(eui) > np.abs(euj):
# capitulation - actor j has upper hand
i_moves = self.df_position[j]-self.df_orig_position[i]
print i,j,eui,euj,'capitulate', j, 'wins', i, 'moves',i_moves
offers[i].append(i_moves)
else:
print i,j,eui,euj,'nothing'
# choose offer requiring minimum movement, then
# update positions
print offers
#exit()
df_new['offer'] = map(lambda x: 0 if len(x)==0 else x[np.argmin(np.abs(x))],offers)
df_new.loc[:,'Position'] = df_new.Position + df_new.offer
# in case max/min is exceeded
df_new.loc[df_new['Position']>self.max_pos,'Position'] = self.max_pos
df_new.loc[df_new['Position']<self.min_pos,'Position'] = self.min_pos
self.df = df_new
print self.df
if __name__ == "__main__":
if len(sys.argv) < 3:
print "\nUsage: run.py [CSV] [ROUNDS]"
exit()
df = pd.read_csv(sys.argv[1]); print df
df.Position = df.Position.astype(float)
df.Capability = df.Capability.astype(float)
df.Salience = df.Salience/100.
game = Game(df)
print 'weighted_median', game.weighted_median(), 'mean', game.mean()
results = pd.DataFrame(index=df.index)
for i in range(int(sys.argv[2])):
results[i] = game.df.Position
df = game.do_round(); print df
print 'weighted_median', game.weighted_median(), 'mean', game.mean()
results = results.T
results.columns = game.df.Actor
print results
results.plot()
fout = '%s/out-%s.png' % (os.environ['TEMP'],sys.argv[1].replace(".csv","-csv"))
plt.savefig(fout)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Mixpanel/DataExport/Funnels/FunnelList.py
|
Python
|
apache-2.0
| 3,581
| 0.004468
|
# -*- coding: utf-8 -*-
###############################################################################
#
# FunnelList
# Gets the names and funnel_ids of your funnels.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FunnelList(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FunnelList Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FunnelList, self).__init__(temboo_session, '/Library/Mixpanel/DataExport/Funnels/FunnelList')
def new_input_set(self):
return FunnelListInputSet()
def _make_result_set(self, result, path):
return FunnelListResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FunnelListChoreographyExecution(session, exec_id, path)
class FunnelListInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FunnelList
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided my Mixpanel. You can find your Mixpanel API Key in the project settings dialog in the Mixpanel app.)
"""
super(FunnelListInputSet, self)._set_input('APIKey', value)
def set_APISecret(self, value):
"""
Set the value o
|
f the APISecret input for this Choreo. ((required, string) The API Secret provided by Mixpanel. You can find your Mixpanel API Secret in the project settings dialog in the Mixpanel app.)
"""
super(FunnelListInputSet, self)._set_input('APISecret', value)
def set_Expire(self, value):
"""
Set the value of the Expire input for this Choreo. ((optional, integer) The amount of minutes past NOW() before th
|
e request will expire. Defaults to 1.)
"""
super(FunnelListInputSet, self)._set_input('Expire', value)
class FunnelListResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FunnelList Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Mixpanel.)
"""
return self._output.get('Response', None)
class FunnelListChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FunnelListResultSet(response, path)
|
telminov/ansible-manager
|
core/views/rest.py
|
Python
|
mit
| 2,316
| 0.003454
|
from django.db.models import Count
from django.http import HttpResponse
from rest_framework.generics import ListAPIView
from rest_framework.views import APIView
from rest_framework.authentication import TokenAuthentication
from prometheus_client import generate_latest
from core import models
from core import serializers
from core import consts
tokenBearer = TokenAuthentication
tokenBearer.keyword = 'Bearer'
class TaskLogs(ListAPIView):
model = models.TaskLog
serializer_class = serializers.TaskLogSerializer
def get_queryset(self):
last_log_id = self.request.GET.get('last_log_id', 0)
return self.model.objects.filter(task_id=self.kwargs['task_id'], id__gt=last_log_id)
task_logs = TaskLogs.as_view()
class DjangoMetrics(APIView):
authentication_classes = (tokenBearer,)
def get(self, request):
result = generate_latest().decode()
return HttpResponse(result, content_type='text/plain; charset=utf-8')
class AnsibleManagerMetrics(APIView):
authentication_classes = (tokenBearer,)
def get(self, request):
result = '# HELP ansible_manager_template_last_task_success show success or fail last task\n'
result += '# TYPE ansible_manager_template_last_task_success gauge\n'
for template in models.TaskTemplate.objects.exclude(cron=''):
completed_tasks = template.tasks.filter(status__in=consts.NOT_RUN_STATUSES)
if not completed_tas
|
ks:
continue
success = int(completed_tasks.last().status == consts.COMPLETED)
result += 'ansible_manager_template_last_task_success{id="%s", name="%s"} %s\n' % (
template.pk, template.name, success)
result += '# HELP ansible_manager_tasks_completed_total show number of completed tasks\n'
result += '# TYPE ansible_manager_tasks_completed_total gauge\n'
tasks = models.Task.objects.values_list('template_
|
_id', 'template__name', 'status').annotate(count=Count('id'))
for template_id, template_name, status, count in tasks:
result += 'ansible_manager_tasks_completed_total{id="%s", name="%s", status="%s"} %s\n' % (
template_id, template_name, status, count
)
return HttpResponse(result, content_type='text/plain; charset=utf-8')
|
fluxw42/youtube-dl
|
youtube_dl/extractor/reverbnation.py
|
Python
|
unlicense
| 1,627
| 0.000615
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
qualities,
str_or_none,
)
class ReverbNationIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?reverbnation\.com/.*?/song/(?P<id>\d+).*?$'
_TESTS = [{
'url': 'http://www.reverbnation.com/alkilados/son
|
g/16965047-mona-lisa',
'md5': 'c0aaf339bcee189495fdf5a8c8ba8645',
'info_dict': {
'id': '16965047',
'ext': 'mp3',
'title': 'MONA LISA',
'uploader': 'ALKILADOS'
|
,
'uploader_id': '216429',
'thumbnail': r're:^https?://.*\.jpg',
},
}]
def _real_extract(self, url):
song_id = self._match_id(url)
api_res = self._download_json(
'https://api.reverbnation.com/song/%s' % song_id,
song_id,
note='Downloading information of song %s' % song_id
)
THUMBNAILS = ('thumbnail', 'image')
quality = qualities(THUMBNAILS)
thumbnails = []
for thumb_key in THUMBNAILS:
if api_res.get(thumb_key):
thumbnails.append({
'url': api_res[thumb_key],
'preference': quality(thumb_key)
})
return {
'id': song_id,
'title': api_res['name'],
'url': api_res['url'],
'uploader': api_res.get('artist', {}).get('name'),
'uploader_id': str_or_none(api_res.get('artist', {}).get('id')),
'thumbnails': thumbnails,
'ext': 'mp3',
'vcodec': 'none',
}
|
hrishioa/Aviato
|
flask/Scripts/kartograph-script.py
|
Python
|
gpl-2.0
| 364
| 0.005495
|
#!C:\Users\SeanSaito\Dev\avi
|
ato\flask\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'kartograph.py==0.6.8','console_scripts','kartograph'
__requires__ = 'kartograph.py==0.6.8'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('kartograp
|
h.py==0.6.8', 'console_scripts', 'kartograph')()
)
|
ecaldwe1/zika
|
website/mixins.py
|
Python
|
mpl-2.0
| 1,140
| 0.000877
|
#!/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of the VecNet Zika modeling interface.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.
|
txt files in its top-level directory; they are
# available at https://github.com/vecnet/zika
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
import json
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
class LoginRequiredMixin(object):
""" This works: class InterviewListView(LoginRequiredMixin, ListView)
This DOES NOT work: class InterviewListView(ListView, LoginRequiredMixin)
I'm not 100% sure that wrapping as_view function using Mixin is a good idea though, but whatever
"""
@classmethod
def as_view(cls, **initkwargs):
# Ignore PyCharm warning below, this is a Mixin class after all
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view)
|
edagar/censorship-analyser
|
test.py
|
Python
|
bsd-3-clause
| 5,035
| 0.000199
|
import subprocess
from random import choice, randint
from time import sleep
import yaml
from ooni.otime import timestamp
import const
class Test(object):
def __init__(self, testfile, args=[]):
self.testfile = testfile
self.args = args
self.output = None
self.status = None
self.errorMessage = None
self.parser = None
self.report = None
self.reportName = None
def run(self):
self.reportName = "report-%s-%s.yamloo" % (self.testfile, timestamp())
self.output = runTest(self)
self.parseResults()
def parseResults(self):
self.parser = TestParser(self)
self.parser.parseReport()
def printResults(self):
self.parser.printResults()
def getResults(self):
return {
"Status": self.status,
"ErrorMessage": self.errorMessage,
}
class SiteProbe(Test):
def __init__(self, testfile=const.PROBE_TEST, target=const.TOR_SITE_URL):
super(SiteProbe, self).__init__(testfile=testfile, args=["-u", target])
self.target = target
class TCPTest(Test):
def __init__(self, testfile=const.TCP_TEST,
target=const.TOR_DOMAIN, port="443"):
super(TCPTest, self).__init__(
testfile=testfile, args=["-t", target, "-p", port])
self.target = target
class PingTest(Test):
def __init__(self, testfile=const.PING_TEST, target=None):
args = ["-t", target] if target is not None else []
super(PingTest, self).__init__(testfile=testfile, args=args)
self.target = target
self.packets = None
def parsePackets(self, report):
try:
return 'echo-reply' in report['ReceivedPackets'][0][0]['summary']
except:
return False
def parseResults(self):
self.parser = TestParser(self)
self.parser.loadReport()
if self.report['TestStatus'] == 'OK':
self.packets = self.report['packets']
if self.parsePackets(self.report):
self.status = "OK"
return
self.status = "FAILED"
self.errorMessage = "Host unreachable"
raise TestException(self)
class DN
|
STest(Test):
def __init__(self, testfile=const.DNS_TEST, target=const.TOR_DOMAIN):
super(DNSTest, self).__init__(testfile=testfile, args=["-t", target])
self.target = target
class Traceroute(Test):
def __init__(self, testfile=const.TRACEROUTE_TEST, target=None):
args = ["-b", target] if target is not None else []
super(Traceroute, self).__init__(testfile=testfile, args=args)
self.target = t
|
arget
class TestParser(object):
def __init__(self, test):
self.test = test
def loadReport(self):
with open(self.test.reportName, 'r') as f:
entries = yaml.safe_load_all(f)
headers = entries.next()
self.test.report = entries.next()
def parseReport(self):
self.loadReport()
self.test.status = self.test.report['TestStatus']
if not self.test.status == "OK":
self.test.errorMessage = self.test.report['TestException']
raise TestException(self.test)
def printResults(self):
print "Test: %s" % self.test.testfile
if hasattr(self.test, "target") and self.test.target is not None:
print "Target: %s" % self.test.target
results = self.test.getResults()
for key, value in results.iteritems():
if key and value:
print "%s: %s" % (key, value)
class TestCase(list):
def __init__(self, tests=[], sleep_interval=const.SLEEP_INTERVAL):
super(TestCase, self).__init__(tests)
self.sleepInterval = sleep_interval
def run(self):
tests = testCaseGenerator(list(self))
for test in tests:
try:
test.run()
except TestException, e:
print e
sleep(randint(self.sleepInterval[0], self.sleepInterval[1]))
def printResults(self):
for test in self:
test.printResults()
print
def getFailed(self):
return [test for test in self if test.status != "OK"]
def testCaseGenerator(seq):
for x in range(len(seq)):
test = choice(seq)
seq.remove(test)
yield test
def runTest(test):
binary = const.OONI_BINARY
args = [binary, "-o", test.reportName, "-n", test.testfile]
if test.args:
args += test.args
print "Running test %s" % test.testfile
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
return output
class TestException(Exception):
def __init__(self, test):
self.testInstance = test
def __str__(self):
return "%s: %s (%s)" % (self.testInstance.testfile,
self.testInstance.status,
self.testInstance.errorMessage)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/parcats/line/colorbar/_tickcolor.py
|
Python
|
mit
| 480
| 0.002083
|
import _plotly_utils.basevalidators
class TickcolorValidator(_plotly_utils.basevali
|
dators.ColorValidator):
def __init__(
self, plotly_name="tickcolor", parent_name="parcats.line.colorbar", **kwargs
):
super(TickcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
rol
|
e=kwargs.pop("role", "style"),
**kwargs
)
|
thaim/ansible
|
lib/ansible/modules/cloud/amazon/lambda.py
|
Python
|
mit
| 23,103
| 0.002813
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda
short_description: Manage AWS Lambda functions
description:
- Allows for the management of Lambda functions.
version_added: '2.2'
requirements: [ boto3 ]
options:
name:
description:
- The name you want to assign to the function you are uploading. Cannot be changed.
required: true
state:
description:
- Create or delete Lambda function.
default: present
choices: [ 'present', 'absent' ]
runtime:
description:
- The runtime environment for the Lambda function you are uploading.
- Required when creating a function. Uses parameters as described in boto3 docs.
- Required when C(state=present).
- For supported list of runtimes, see U(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html).
role:
description:
- The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
resources. You may use the bare ARN if the role belongs to the same AWS account.
- Required when C(state=present).
handler:
description:
- The function within your code that Lambda calls to begin execution.
zip_file:
description:
- A .zip file containing your deployment package
- If C(state=present) then either zip_file or s3_bucket must be present.
aliases: [ 'src' ]
s3_bucket:
description:
- Amazon S3 bucket name where the .zip file containing your deployment package is stored.
- If C(state=present) then either zip_file or s3_bucket must be present.
- C(s3_bucket) and C(s3_key) are required together.
s3_key:
description:
- The Amazon S3 object (the deployment package) key name you want to upload.
- C(s3_bucket) and C(s3_key) are required together.
s3_object_version:
description:
- The Amazon S3 object (the deployment package) version you want to upload.
description:
description:
- A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
timeout:
description:
- The function maximum execution time in seconds after which Lambda should terminate the function.
default: 3
memory_size:
description:
- The amount of memory, in MB, your Lambda function is given.
default: 128
vpc_subnet_ids:
description:
- List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run
the function in a VPC.
vpc_security_group_ids:
description:
- List of VPC security group IDs to associate with the Lambda function. Required when vpc_subnet_ids is used.
environment_variables:
description:
- A dictionary of environment variables the Lambda function is given.
aliases: [ 'environment' ]
version_added: "2.3"
dead_letter_arn:
description:
- The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
version_added: "2.3"
tags:
description:
- tag dict to apply to the function (requires botocore 1.5.40 or above).
version_added: "2.5"
author:
- 'Steyn Huizinga (@steynovich)'
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create Lambda functions
- name: looped creation
lambda:
name: '{{ item.name }}'
state: present
zip_file: '{{ item.zip_file }}'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lamb
|
da_basic_execution'
handler: 'hello_python.my_
|
handler'
vpc_subnet_ids:
- subnet-123abcde
- subnet-edcba321
vpc_security_group_ids:
- sg-123abcde
- sg-edcba321
environment_variables: '{{ item.env_vars }}'
tags:
key1: 'value1'
loop:
- name: HelloWorld
zip_file: hello-code.zip
env_vars:
key1: "first"
key2: "second"
- name: ByeBye
zip_file: bye-code.zip
env_vars:
key1: "1"
key2: "2"
# To remove previously added tags pass an empty dict
- name: remove tags
lambda:
name: 'Lambda function'
state: present
zip_file: 'code.zip'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
tags: {}
# Basic Lambda function deletion
- name: Delete Lambda functions HelloWorld and ByeBye
lambda:
name: '{{ item }}'
state: absent
loop:
- HelloWorld
- ByeBye
'''
RETURN = '''
code:
description: the lambda function location returned by get_function in boto3
returned: success
type: dict
sample:
{
'location': 'a presigned S3 URL',
'repository_type': 'S3',
}
configuration:
description: the lambda function metadata returned by get_function in boto3
returned: success
type: dict
sample:
{
'code_sha256': 'SHA256 hash',
'code_size': 123,
'description': 'My function',
'environment': {
'variables': {
'key': 'value'
}
},
'function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1',
'function_name': 'myFunction',
'handler': 'index.handler',
'last_modified': '2017-08-01T00:00:00.000+0000',
'memory_size': 128,
'role': 'arn:aws:iam::123456789012:role/lambda_basic_execution',
'runtime': 'nodejs6.10',
'timeout': 3,
'version': '1',
'vpc_config': {
'security_group_ids': [],
'subnet_ids': []
}
}
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import compare_aws_tags
import base64
import hashlib
import traceback
import re
try:
from botocore.exceptions import ClientError, BotoCoreError, ValidationError, ParamValidationError
except ImportError:
pass # protected by AnsibleAWSModule
def get_account_info(module, region=None, endpoint=None, **aws_connect_kwargs):
"""return the account information (account id and partition) we are currently working on
get_account_info tries too find out the account that we are working
on. It's not guaranteed that this will be easy so we try in
several different ways. Giving either IAM or STS privileges to
the account should be enough to permit this.
"""
account_id = None
partition = None
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts',
region=region, endpoint=endpoint, **aws_connect_kwargs)
caller_id = sts_client.get_caller_identity()
account_id = caller_id.get('Account')
partition = caller_id.get('Arn').split(':')[1]
except ClientError:
try:
iam_client = boto3_conn(module, conn_type='client', resource='iam',
region=region, endpoint=endpoint, **aws_connect_kwargs)
arn, partition, service, reg, account_id, resource = iam_client.get_user()['User']['Arn'].split(':')
except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'):
|
esplinr/foodcheck
|
wsgi/foodcheck_proj/settings.py
|
Python
|
agpl-3.0
| 8,249
| 0.003758
|
# -*- coding: utf-8 -*-
'''
Django settings for foodcheck project
'''
# Copyright (C) 2013 Timothy James Austen, Eileen Qiuhua Lin,
# Richard Esplin <richard-oss@esplins.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import imp, os
# a setting to determine whether we are running on OpenShift
ON_OPENSHIFT = False
if os.environ.has_key('OPENSHIFT_REPO_DIR'):
ON_OPENSHIFT = True
PROJECT_DIR = os.path.dirname(os.path.realpath(__file__))
<<<<<<< HEAD:wsgi/foodcheck_proj/settings.py
# turn off debug when on production
if (os.environ['OPENSHIFT_NAMESPACE'] == 'foodcheck' and
os.environ['OPENSHIFT_APP_NAME'] == 'live'):
DEBUG = False
=======
if ON_OPENSHIFT:
DEBUG = bool(os.environ.get('DEBUG', False))
if DEBUG:
print("WARNING: The DEBUG environment is set to True.")
>>>>>>> django-example/master:wsgi/openshift/settings.py
else:
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
('Richard Esplin', 'richard-oss@esplins.org'),
('Timothy Austen', 'austentj@gmail.com'),
)
MANAGERS = ADMINS
if ON_OPENSHIFT:
# os.environ['OPENSHIFT_MYSQL_DB_*'] variables can be used with databases created
# with rhc cartridge add (see /README in this git repo)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['OPENSHIFT_APP_NAME'],
'USER': os.environ['OPENSHIFT_POSTGRESQL_DB_USERNAME'],
'PASSWORD': os.environ['OPENSHIFT_POSTGRESQL_DB_PASSWORD'],
'HOST': os.environ['OPENSHIFT_POSTGRESQL_DB_HOST'],
'PORT': os.environ['OPENSHIFT_POSTGRESQL_DB_PORT'],
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(os.environ['OPENSHIFT_DATA_DIR'], 'sqlite3.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.environ.get('OPENSHIFT_DATA_DIR', '')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_DIR, '..', 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# Listing the project dir here avoids having to collect static files in a
# subdirectory i.e. /static/css instead of /static/foodcheck_proj/css
# example: os.path.join(PROJECT_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make a dictionary of default keys
default_keys = { 'SECRET_KEY': 'vm4rl5*ymb@2&d_(gc$gb-^twq9w(u69hi--%$5xrh!xk(t%hw' }
# Replace default keys with dynamic values if we are in OpenShift
use_keys = default_keys
if ON_OPENSHIFT:
imp.find_module('openshiftlibs')
import openshiftlibs
use_keys = openshiftlibs.openshift_secure(default_keys)
# Make this unique, and don't share it with anybody.
SECRET_KEY = use_keys['SECRET_KEY']
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware
|
.MessageMiddleware',
)
ROOT_URLCONF = 'foodcheck_proj.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'd
|
jango.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'south',
'leaflet',
'foodcheck_app',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
)
# Leaflet settings
LEAFLET_CONFIG = {
'DEFAULT_CENTER': (37.7750, -122.4183),
'DEFAULT_ZOOM': 10,
'PLUGINS': {
'main': {
'js': STATIC_URL + 'js/demo.js',
},
|
jandom/rdkit
|
rdkit/Chem/PeriodicTable.py
|
Python
|
bsd-3-clause
| 5,870
| 0.018739
|
# $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" periodic table data, **obsolete**
now that the C++ code exposes an interface to the internal PT stuff,
this data is mostly obsolete
"""
# Num Symb RCov RBO RVdW Max Bnd Mass nval
periodicData=\
"""
0 X 0.0 0.0 0.0 0 0.000 0
1 H 0.230 0.330 1.200 1 1.008 1
2 He 0.930 0.700 1.400 0 4.003 2
3 Li 0.680 1.230 1.820 1 6.941 1
4 Be 0.350 0.900 1.700 2 9.012 2
5 B 0.830 0.820 2.080 3 10.812 3
6 C 0.680 0.770 1.950 4 12.011 4
7 N 0.680 0.700 1.850 4 14.007 5
8 O 0.680 0.660 1.700 2 15.999 6
9 F 0.640 0.611 1.730 1 18.998 7
10 Ne 1.120 0.700 1.540 0 20.180 8
11 Na 0.970 1.540 2.270 1 22.990 1
12 Mg 1.100 1.360 1.730 2 24.305 2
13 Al 1.350 1.180 2.050 6 26.982 3
14 Si 1.200 0.937 2.100 6 28.086 4
15 P 0.750 0.890 2.080 5 30.974 5
16 S 1.020 1.040 2.000 6 32.067 6
17 Cl 0.990 0.997 1.970 1 35.453 7
18 Ar 1.570 1.740 1.880 0 39.948 8
19 K 1.330 2.030 2.750 1 39.098 1
20 Ca 0.990 1.740 1.973 2 40.078 2
21 Sc 1.440 1.440 1.700 6 44.956 3
22 Ti 1.470 1.320 1.700 6 47.867 4
23 V 1.330 1.220 1.700 6 50.942 5
24 Cr 1.350 1.180 1.700 6 51.996 6
25 Mn 1.350 1.170 1.700 8 54.938 7
26 Fe 1.340 1.170 1.700 6 55.845 8
27 Co 1.330 1.160 1.700 6 58.933 9
28 Ni 1.500 1.150 1.630 6 58.693 10
29 Cu 1.520 1.170 1.400 6 63.546 11
30 Zn 1.450 1.250 1.390 6
|
65.39 2
31 Ga 1.220 1.260 1.870 3 69.723 3
32 Ge 1.170 1.188 1.700 4 72.61 4
33 As 1.210 1.200 1.850 3 74.922 5
34 Se 1.220 1.170 1.900 2 78.96 6
35 Br 1.210 1.167 2.100 1 79.904 7
36 Kr 1.910 1.910 2.020 0 83.80 8
37 Rb 1.470 2.160 1.700 1 85.468 1
38 Sr 1.120 1.910 1.700 2 87.62 2
39 Y 1.780 1.620 1.700 6 88.906 3
40 Zr 1.560 1.450 1.700 6 91.224 4
41 Nb 1.480 1.340 1.700
|
6 92.906 5
42 Mo 1.470 1.300 1.700 6 95.94 6
43 Tc 1.350 1.270 1.700 6 98.0 7
44 Ru 1.400 1.250 1.700 6 101.07 8
45 Rh 1.450 1.250 1.700 6 102.906 9
46 Pd 1.500 1.280 1.630 6 106.42 10
47 Ag 1.590 1.340 1.720 6 107.868 11
48 Cd 1.690 1.480 1.580 6 112.412 2
49 In 1.630 1.440 1.930 3 114.818 3
50 Sn 1.460 1.385 2.170 4 118.711 4
51 Sb 1.460 1.400 2.200 3 121.760 5
52 Te 1.470 1.378 2.060 2 127.60 6
53 I 1.400 1.387 2.150 1 126.904 7
54 Xe 1.980 1.980 2.160 0 131.29 8
55 Cs 1.670 2.350 1.700 1 132.905 1
56 Ba 1.340 1.980 1.700 2 137.328 2
57 La 1.870 1.690 1.700 12 138.906 3
58 Ce 1.830 1.830 1.700 6 140.116 4
59 Pr 1.820 1.820 1.700 6 140.908 3
60 Nd 1.810 1.810 1.700 6 144.24 4
61 Pm 1.800 1.800 1.700 6 145.0 5
62 Sm 1.800 1.800 1.700 6 150.36 6
63 Eu 1.990 1.990 1.700 6 151.964 7
64 Gd 1.790 1.790 1.700 6 157.25 8
65 Tb 1.760 1.760 1.700 6 158.925 9
66 Dy 1.750 1.750 1.700 6 162.50 10
67 Ho 1.740 1.740 1.700 6 164.930 11
68 Er 1.730 1.730 1.700 6 167.26 12
69 Tm 1.720 1.720 1.700 6 168.934 13
70 Yb 1.940 1.940 1.700 6 173.04 14
71 Lu 1.720 1.720 1.700 6 174.967 15
72 Hf 1.570 1.440 1.700 6 178.49 4
73 Ta 1.430 1.340 1.700 6 180.948 5
74 W 1.370 1.300 1.700 6 183.84 6
75 Re 1.350 1.280 1.700 6 186.207 7
76 Os 1.370 1.260 1.700 6 190.23 8
77 Ir 1.320 1.270 1.700 6 192.217 9
78 Pt 1.500 1.300 1.720 6 195.078 10
79 Au 1.500 1.340 1.660 6 196.967 11
80 Hg 1.700 1.490 1.550 6 200.59 2
81 Tl 1.550 1.480 1.960 3 204.383 3
82 Pb 1.540 1.480 2.020 4 207.2 4
83 Bi 1.540 1.450 1.700 3 208.980 5
84 Po 1.680 1.460 1.700 2 209.0 6
85 At 1.700 1.450 1.700 1 210.0 7
86 Rn 2.400 2.400 1.700 0 222.0 8
87 Fr 2.000 2.000 1.700 1 223.0 1
88 Ra 1.900 1.900 1.700 2 226.0 2
89 Ac 1.880 1.880 1.700 6 227.0 3
90 Th 1.790 1.790 1.700 6 232.038 4
91 Pa 1.610 1.610 1.700 6 231.036 3
92 U 1.580 1.580 1.860 6 238.029 4
93 Np 1.550 1.550 1.700 6 237.0 5
94 Pu 1.530 1.530 1.700 6 244.0 6
95 Am 1.510 1.070 1.700 6 243.0 7
96 Cm 1.500 0.000 1.700 6 247.0 8
97 Bk 1.500 0.000 1.700 6 247.0 9
98 Cf 1.500 0.000 1.700 6 251.0 10
99 Es 1.500 0.000 1.700 6 252.0 11
100 Fm 1.500 0.000 1.700 6 257.0 12
101 Md 1.500 0.000 1.700 6 258.0 13
102 No 1.500 0.000 1.700 6 259.0 14
103 Lr 1.500 0.000 1.700 6 262.0 15
"""
nameTable = {}
numTable = {}
for line in periodicData.split('\n'):
splitLine = line.split()
if len(splitLine) > 1:
nameTable[splitLine[1]] = (int(splitLine[0]),float(splitLine[6]),int(splitLine[7]),\
int(splitLine[5]),float(splitLine[2]),float(splitLine[3]),
float(splitLine[4]))
numTable[int(splitLine[0])] = (splitLine[1],float(splitLine[6]),int(splitLine[7]),\
int(splitLine[5]),float(splitLine[2]),float(splitLine[3]),
float(splitLine[4]))
# a list of metals (transition metals, semi-metals, lanthanides and actinides)
metalRanges = ["13", "21-32", "39-51", "57-84", "89-103"]
metalNumList = []
for entry in metalRanges:
t = entry.split('-')
start = int(t[0])
if len(t) > 1:
end = int(t[1])
else:
end = start
if start > end:
start, end = end, start
metalNumList += range(start, end + 1)
metalNames = map(lambda x: numTable[x][0], metalNumList)
# these are from table 4 of Rev. Comp. Chem. vol 2, 367-422, (1991)
# the order is [alpha(SP),alpha(SP2),alpha(SP3)]
# where values are not known, None has been inserted
hallKierAlphas = {
'H': [0.0, 0.0, 0.0], # removes explicit H's from consideration in the shape
'C': [-0.22, -0.13, 0.0],
'N': [-0.29, -0.20, -0.04],
'O': [None, -0.20, -0.04],
'F': [None, None, -0.07],
'P': [None, 0.30, 0.43],
'S': [None, 0.22, 0.35],
'Cl': [None, None, 0.29],
'Br': [None, None, 0.48],
'I': [None, None, 0.73]
}
|
dreaming-dog/kaldi-long-audio-alignment
|
scripts/classes/entry.py
|
Python
|
apache-2.0
| 326
| 0.04908
|
# Copyright 2017 Speech Lab, EE Dept., IITM (Author: Srinivas Venkattaramanu
|
jam)
class Entry:
def __init__(self, begin_time, end_time, st
|
atus, word_begin, word_end):
self.begin_time=float(begin_time)
self.end_time=float(end_time)
self.status=status
self.word_begin=int(word_begin)
self.word_end=int(word_end)
|
waprin/gcloud-python
|
gcloud/logging/test_logger.py
|
Python
|
apache-2.0
| 25,904
| 0
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestLogger(unittest2.TestCase):
PROJECT = 'test-project'
LOGGER_NAME = 'logger-name'
def _getTargetClass(self):
from gcloud.logging.logger import Logger
return Logger
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
conn = object()
client = _Client(self.PROJECT, conn)
logger = self._makeOne(self.LOGGER_NAME, client=client)
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertTrue(logger.client is client)
self.assertEqual(logger.project, self.PROJECT)
self.assertEqual(logger.full_name, 'projects/%s/logs/%s'
% (self.PROJECT, self.LOGGER_NAME))
self.assertEqual(logger.path, '/projects/%s/logs/%s'
% (self.PROJECT, self.LOGGER_NAME))
self.assertEqual(logger.labels, None)
def test_ctor_explicit(self):
LABELS = {'foo': 'bar', 'baz': 'qux'}
conn = object()
client = _Client(self.PROJECT, conn)
logger = self._makeOne(self.LOGGER_NAME, client=client, labels=LABELS)
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertTrue(logger.client is client)
self.assertEqual(logger.project, self.PROJECT)
self.assertEqual(logger.full_name, 'projects/%s/logs/%s'
% (self.PROJECT, self.LOGGER_NAME))
self.assertEqual(logger.path, '/projects/%s/logs/%s'
% (self.PROJECT, self.LOGGER_NAME))
self.assertEqual(logger.labels, LABELS)
def test_batch_w_bound_client(self):
from gcloud.logging.logger import Batch
conn = object()
client = _Client(self.PROJECT, conn)
logger = self._makeOne(self.LOGGER_NAME, client=client)
batch = logger.batch()
self.assertTrue(isinstance(batch, Batch))
self.assertTrue(batch.logger is logger)
self.assertTrue(batch.client is client)
def test_batch_w_alternate_client(self):
from gcloud.logging.logger import Batch
conn1 = object()
conn2 = object()
client1 = _Client(self.PROJECT, conn1)
client2 = _Client(self.PROJECT, conn2)
logger = self._makeOne(self.LOGGER_NAME, client=client1)
batch = logger.batch(client2)
self.assertTrue(isinstance(batch, Batch))
self.assertTrue(batch.logger is logger)
self.assertTrue(batch.client is client2)
def test_log_text_w_str_implicit_client(self):
TEXT = 'TEXT'
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'textPayload': TEXT,
'resource': {
'type': 'global',
},
}]
client = _Client(self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client)
|
logger.log_text(TEXT)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_text_w_default_labels(self):
TEXT = 'TEXT'
DEFAULT_LABELS = {'foo': 'spam'}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'textPayload': TEXT,
|
'resource': {
'type': 'global',
},
'labels': DEFAULT_LABELS,
}]
client = _Client(self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client,
labels=DEFAULT_LABELS)
logger.log_text(TEXT)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_text_w_unicode_explicit_client_labels_severity_httpreq(self):
TEXT = u'TEXT'
DEFAULT_LABELS = {'foo': 'spam'}
LABELS = {'foo': 'bar', 'baz': 'qux'}
IID = 'IID'
SEVERITY = 'CRITICAL'
METHOD = 'POST'
URI = 'https://api.example.com/endpoint'
STATUS = '500'
REQUEST = {
'requestMethod': METHOD,
'requestUrl': URI,
'status': STATUS,
}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'textPayload': TEXT,
'resource': {
'type': 'global',
},
'labels': LABELS,
'insertId': IID,
'severity': SEVERITY,
'httpRequest': REQUEST,
}]
client1 = _Client(self.PROJECT)
client2 = _Client(self.PROJECT)
api = client2.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client1,
labels=DEFAULT_LABELS)
logger.log_text(TEXT, client=client2, labels=LABELS,
insert_id=IID, severity=SEVERITY, http_request=REQUEST)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_struct_w_implicit_client(self):
STRUCT = {'message': 'MESSAGE', 'weather': 'cloudy'}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'jsonPayload': STRUCT,
'resource': {
'type': 'global',
},
}]
client = _Client(self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client)
logger.log_struct(STRUCT)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_struct_w_default_labels(self):
STRUCT = {'message': 'MESSAGE', 'weather': 'cloudy'}
DEFAULT_LABELS = {'foo': 'spam'}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'jsonPayload': STRUCT,
'resource': {
'type': 'global',
},
'labels': DEFAULT_LABELS,
}]
client = _Client(self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client,
labels=DEFAULT_LABELS)
logger.log_struct(STRUCT)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_struct_w_explicit_client_labels_severity_httpreq(self):
STRUCT = {'message': 'MESSAGE', 'weather': 'cloudy'}
DEFAULT_LABELS = {'foo': 'spam'}
LABELS = {'foo': 'bar', 'baz': 'qux'}
IID = 'IID'
SEVERITY = 'CRITICAL'
METHOD = 'POST'
URI = 'https://api.example.com/endpoint'
STATUS = '500'
REQUEST = {
'requestMethod': METHOD,
'requestUrl': URI,
'status': STATUS,
}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'jsonPayload': STRUCT,
'resource': {
'type': 'global',
},
'labels': LABELS,
'insertId': IID,
'severity': SEVERITY,
'httpRequest': REQUEST,
}]
client1 = _Client(self.PROJECT)
client2 = _Client(self.PROJECT)
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awx/main/migrations/0109_v370_job_template_organization_field.py
|
Python
|
apache-2.0
| 3,857
| 0.002074
|
# Generated by Django 2.2.4 on 2019-08-07 19:56
import awx.main.utils.polymorphic
import awx.main.fields
from django.db import migrations, models
import django.db.models.deletion
from awx.main.migrations._rbac import (
rebuild_role_parentage, rebuild_role_hierarchy,
migrate_ujt_organization, migrate_ujt_organization_backward,
restore_inventory_admins, restore_inventory_admins_backward
)
def rebuild_jt_parents(apps, schema_editor):
rebuild_role_parentage(apps, schema_editor, models=('jobtemplate',))
class Migration(migrations.Migration):
dependencies = [
('main', '0108_v370_unifiedjob_dependencies_processed'),
]
op
|
erations = [
# backwards parents and ancestors caching
migrations.RunPython(migrations.RunPython.noop, rebuild_jt_parents),
# add new organizat
|
ion field for JT and all other unified jobs
migrations.AddField(
model_name='unifiedjob',
name='tmp_organization',
field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this unified job.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobs', to='main.Organization'),
),
migrations.AddField(
model_name='unifiedjobtemplate',
name='tmp_organization',
field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this template.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobtemplates', to='main.Organization'),
),
# while new and old fields exist, copy the organization fields
migrations.RunPython(migrate_ujt_organization, migrate_ujt_organization_backward),
# with data saved, remove old fields
migrations.RemoveField(
model_name='project',
name='organization',
),
migrations.RemoveField(
model_name='workflowjobtemplate',
name='organization',
),
# now, without safely rename the new field without conflicts from old field
migrations.RenameField(
model_name='unifiedjobtemplate',
old_name='tmp_organization',
new_name='organization',
),
migrations.RenameField(
model_name='unifiedjob',
old_name='tmp_organization',
new_name='organization',
),
# parentage of job template roles has genuinely changed at this point
migrations.AlterField(
model_name='jobtemplate',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.job_template_admin_role'], related_name='+', to='main.Role'),
),
migrations.AlterField(
model_name='jobtemplate',
name='execute_role',
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['admin_role', 'organization.execute_role'], related_name='+', to='main.Role'),
),
migrations.AlterField(
model_name='jobtemplate',
name='read_role',
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'], related_name='+', to='main.Role'),
),
# Re-compute the role parents and ancestors caching
migrations.RunPython(rebuild_jt_parents, migrations.RunPython.noop),
# for all permissions that will be removed, make them explicit
migrations.RunPython(restore_inventory_admins, restore_inventory_admins_backward),
]
|
vlegoff/mud
|
typeclasses/players.py
|
Python
|
bsd-3-clause
| 4,163
| 0.001681
|
"""
Player
The Player represents the game "account" and each login has only one
Player object. A Player is what chats on default channels but has no
other in-game-world existance. Rather the Player puppets Objects (such
as Characters) in order to actually participate in the game world.
Guest
Guest players are simple low-level accounts that are created/deleted
on the fly and allows users to test the game without the committment
of a full registration. Guest accounts are deactivated by default; to
activate them, add the following line to your settings file:
GUEST_ENABLED = True
You will also need to modify the connection screen to reflect the
possibility to connect with a guest account. The setting file accepts
several more options for customizing the Guest account system.
"""
from evennia import DefaultPlayer, DefaultGuest
class Player(DefaultPlayer):
"""
This class describes the actual OOC player (i.e. the user connecting
to the MUD). It does NOT have visual appearance in the game world (that
is handled by the character which is connected to this). Comm channels
are attended/joined using this object.
It can be useful e.g. for storing configuration options for your game, but
should generally not hold any character-related info (that's best handled
on the character level).
Can be set using BASE_PLAYER_TYPECLASS.
* available properties
key (string) - name of player
name (string)- wrapper for user.username
aliases (list of strings) - aliases to the object. Will be saved to database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
user (User, read-only) - django User authorization object
obj (Object)
|
- game object controlled by player. 'character' can also be used.
sessions (list of Sessions) - sessions connected to this player
is_superuser (bool, read-only) - if the connected user is a superuser
* Handlers
locks - lock-
|
handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not create a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
* Helper methods
msg(text=None, **kwargs)
swap_character(new_character, delete_old_character=False)
execute_cmd(raw_string, session=None)
search(ostring, global_search=False, attribute_name=None, use_nicks=False, location=None, ignore_errors=False, player=False)
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False)
check_permstring(permstring)
* Hook methods (when re-implementation, remember methods need to have self as first arg)
basetype_setup()
at_player_creation()
- note that the following hooks are also found on Objects and are
usually handled on the character level:
at_init()
at_cmdset_get(**kwargs)
at_first_login()
at_post_login(session=None)
at_disconnect()
at_message_receive()
at_message_send()
at_server_reload()
at_server_shutdown()
"""
def at_look(self, target=None, session=None):
"""Disable the look command on players.
This is due to the fact that players are never in OOC
mode. When login through the menu, the user has to
select a character and then automatically puppets it.
Likewise, the @ooc and @ic commands are disabled.
"""
return ""
class Guest(DefaultGuest):
"""
This class is used for guest logins. Unlike Players, Guests and their
characters are deleted after disconnection.
"""
pass
|
google-research/plur
|
plur/stage_1/code2seq_dataset.py
|
Python
|
apache-2.0
| 16,900
| 0.006154
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for converting the Code2Seq dataset to a PLUR dataset.
"""
import os
import tarfile
import apache_beam as beam
from plur.stage_1.plur_dataset import Configuration
from plur.stage_1.plur_dataset import PlurDataset
from plur.utils import constants
from plur.utils import util
from plur.utils.graph_to_output_example import GraphToOutputExample
from plur.utils.graph_to_output_example import GraphToOutputExampleNotValidError
import tqdm
class Code2SeqDataset(PlurDataset):
# pylint: disable=line-too-long
"""Converting data from code2seq dataset to a PLUR dataset.
The dataset is used in: Alon, Uri, et al. 'code2seq: Generating sequences from
structured representations of code.' arXiv preprint arXiv:1808.01400 (2018).
The task is to predict the function name given the function body.
The provided dataset by code2seq are the tokenized function name, and the AST
paths. Therefore we have to create our own graph represent
|
ation of code2seq.
We try to mimic the code2seq model by constructing a graph similar to figure
3 in the code2seq paper. An example of such graph is shown in
https://drive.google.com/file/d/1-cH0FzYI
|
MikgTkUpzVkEZDGjoiqBB9C1/view?usp=sharing.
In short, we build the AST path subtree and connects all AST paths with a
code2seq root node to make it a graph.
"""
_URLS_SMALL = {
'java-small-preprocessed.tar.gz': {
'url': 'https://s3.amazonaws.com/code2seq/datasets/java-small-preprocessed.tar.gz',
'sha1sum': '857c2495785f606ab99676c7bbae601ea2160f66',
}
}
_URLS_MED = {
'java-med-preprocessed.tar.gz': {
'url': 'https://s3.amazonaws.com/code2seq/datasets/java-med-preprocessed.tar.gz',
'sha1sum': '219e558ddf46678ef322ff75bf1982faa1b6204d',
}
}
_URLS_LARGE = {
'java-large-preprocessed.tar.gz': {
'url': 'https://s3.amazonaws.com/code2seq/datasets/java-large-preprocessed.tar.gz',
'sha1sum': 'ebc229ba1838a3c8f3a69ab507eb26fa5460152a',
}
}
# pylint: enable=line-too-long
_GIT_URL = {}
_DATASET_NAME = 'code2seq_dataset'
_DATASET_DESCRIPTION = """\
This dataset is used to train the code2seq model. The task is to predict the
function name, given the ast paths sampled the function AST. An AST path is
a path between two leaf nodes in the AST.
"""
def __init__(self,
stage_1_dir,
configuration: Configuration = Configuration(),
transformation_funcs=(),
filter_funcs=(),
user_defined_split_range=(),
num_shards=1000,
seed=0,
dataset_size='small',
deduplicate=False):
# dataset_size can only be 'small', 'med' or 'large'.
valid_dataset_size = {'small', 'med', 'large'}
if dataset_size not in valid_dataset_size:
raise ValueError('{} not in {}'.format(dataset_size,
str(valid_dataset_size)))
if dataset_size == 'small':
urls = self._URLS_SMALL
elif dataset_size == 'med':
urls = self._URLS_MED
else:
urls = self._URLS_LARGE
self.dataset_size = dataset_size
super().__init__(self._DATASET_NAME, urls, self._GIT_URL,
self._DATASET_DESCRIPTION, stage_1_dir,
transformation_funcs=transformation_funcs,
filter_funcs=filter_funcs,
user_defined_split_range=user_defined_split_range,
num_shards=num_shards, seed=seed,
configuration=configuration, deduplicate=deduplicate)
def download_dataset(self):
"""Download the dataset using requests and extract the tarfile."""
super().download_dataset_using_requests()
# Extract the tarfile depending on the dataset size.
if self.dataset_size == 'small':
self.code2seq_extracted_dir = os.path.join(
self.raw_data_dir, 'java-small')
tarfile_name = 'java-small-preprocessed.tar.gz'
elif self.dataset_size == 'med':
self.code2seq_extracted_dir = os.path.join(
self.raw_data_dir, 'java-med')
tarfile_name = 'java-med-preprocessed.tar.gz'
else:
self.code2seq_extracted_dir = os.path.join(
self.raw_data_dir, 'java-large')
tarfile_name = 'java-large-preprocessed.tar.gz'
tarfiles_to_extract = []
tarfiles_to_extract = util.check_need_to_extract(
tarfiles_to_extract, self.code2seq_extracted_dir,
tarfile_name)
for filename in tarfiles_to_extract:
dest = os.path.join(self.raw_data_dir, filename)
with tarfile.open(dest, 'r:gz') as tf:
for member in tqdm.tqdm(
tf.getmembers(),
unit='file',
desc='Extracting {}'.format(filename)):
tf.extract(member, self.raw_data_dir)
def get_all_raw_data_paths(self):
"""Get paths to all raw data."""
# Get the filenames depending on the dataset size.
if self.dataset_size == 'small':
train_file = os.path.join(
self.code2seq_extracted_dir, 'java-small.train.c2s')
validation_file = os.path.join(
self.code2seq_extracted_dir, 'java-small.val.c2s')
test_file = os.path.join(
self.code2seq_extracted_dir, 'java-small.test.c2s')
elif self.dataset_size == 'med':
train_file = os.path.join(
self.code2seq_extracted_dir, 'java-med.train.c2s')
validation_file = os.path.join(
self.code2seq_extracted_dir, 'java-med.val.c2s')
test_file = os.path.join(
self.code2seq_extracted_dir, 'java-med.test.c2s')
else:
train_file = os.path.join(
self.code2seq_extracted_dir, 'java-large.train.c2s')
validation_file = os.path.join(
self.code2seq_extracted_dir, 'java-large.val.c2s')
test_file = os.path.join(
self.code2seq_extracted_dir, 'java-large.test.c2s')
return [train_file, validation_file, test_file]
def raw_data_paths_to_raw_data_do_fn(self):
"""Returns a beam.DoFn subclass that reads the raw data."""
return C2SExtractor(super().get_random_split,
bool(self.user_defined_split_range))
def _construct_token_subtree(self, graph_to_output_example, token,
cur_node_id, token_root_name):
# pylint: disable=line-too-long
"""Construct the token subtree in a AST path.
We create a node for each subtoken in the token, all subtokens are connected
to the next subtoken via the 'NEXT_SUBTOKEN' edge. All subtokens are
connected to the token root node via the 'SUBTOKEN' edge. See the draw.io
figure mentioned in the class doc for the visualization.
Args:
graph_to_output_example: A GraphToOutputExample instance.
token: Starting or ending token in the AST path.
cur_node_id: Next available node id.
token_root_name: Node type and label for the token root node.
Returns:
A tuple of graph_to_output_example, cur_node_id, token_node_id.
graph_to_output_example is updated with the token subtree, cur_node_id is
the next available node id after all the token subtree nodes are added,
and token_node_id is the node id of the root token node.
"""
subtokens = token.split('|')
subtoken_node_ids = []
prev_subtoken_id = -1
# Create a node each subtoken.
for subtoken in subtokens:
graph_to_output_example.add_node(cur_node_id, 'SUBTOKEN', subtoken)
subtoken_node_ids.append(cur_node_id)
# Connects to the previous subtoken node
if prev_subtoken_id != -1:
gr
|
dwhall/pq
|
examples/countdown.py
|
Python
|
mit
| 1,269
| 0.002364
|
#!/usr/bin/env python3
import asyncio
import pq
class Countdown(pq.Ahsm):
def __init__(self, count=3):
super().__init__(Countdown.initial)
self.count = count
@pq.Hsm.state
def initial(me, event):
print("initial")
me.te = pq.Tim
|
eEvent("TIME_TICK")
return me.tran(me, Countdown.counting)
|
@pq.Hsm.state
def counting(me, event):
sig = event.signal
if sig == pq.Signal.ENTRY:
print("counting")
me.te.postIn(me, 1.0)
return me.handled(me, event)
elif sig == pq.Signal.TIME_TICK:
print(me.count)
if me.count == 0:
return me.tran(me, Countdown.done)
else:
me.count -= 1
me.te.postIn(me, 1.0)
return me.handled(me, event)
return me.super(me, me.top)
@pq.Hsm.state
def done(me, event):
sig = event.signal
if sig == pq.Signal.ENTRY:
print("done")
pq.Framework.stop()
return me.handled(me, event)
return me.super(me, me.top)
if __name__ == "__main__":
sl = Countdown(10)
sl.start(0)
loop = asyncio.get_event_loop()
loop.run_forever()
loop.close()
|
xupingmao/xnote
|
xutils/dbutil_sortedset.py
|
Python
|
gpl-3.0
| 3,154
| 0.013316
|
# -*- coding:utf-8 -*-
# @author xupingmao
# @since 2021/12/05 11:25:18
# @modified 2022/01/24 14:47:38
# @filename dbutil_sortedset.py
"""【待实现】有序集合,用于各种需要排名的场景,比如
- 最近编辑的笔记
- 访问次数最多的笔记
如果使用了LdbTable的索引功能,其实就不需要这个了
"""
from xutils.dbutil_base import *
from xutils.dbutil_hash import LdbHashTable
register_table("_rank", "排名表")
class RankTable:
def __init__(self, table_name, user_name = None):
check_table_name(table_name)
self.table_name = table_name
self.prefix = "_rank:" + table_name
if user_name != None and user_name != "":
self.prefix += ":" + user_name
if self.prefix[-1] != ":":
self.prefix += ":"
def _format_score(self, score):
if score is None:
return "$"
if isinstance(score, int):
return "%020d" % score
if isinstance(score, str):
return "%020s" % score
raise Exception("_format_score: unsupported score (%r)" % score)
def put(self, member, score, batch = None):
score_
|
str = self._format_score(score)
key = self.prefix + str(score) + ":" + member
if batch != None:
batch.put(key, member)
else:
put(key, member)
def delete(self, member, score, batch = None):
score_str = self._format_score(score)
key = self.prefix + str(score) + ":" + member
if batch != None:
batch.delete(key)
else:
delete(key)
|
def list(self, offset = 0, limit = 10, reverse = False):
return prefix_list(self.prefix, offset = offset,
limit = limit, reverse = reverse)
class LdbSortedSet:
def __init__(self, table_name, user_name = None, key_name = "_key"):
# key-value的映射
self.member_dict = LdbHashTable(table_name, user_name)
# score的排名
self.rank = RankTable(table_name, user_name)
def put(self, member, score):
"""设置成员分值"""
with get_write_lock(member):
batch = create_write_batch()
old_score = self.member_dict.get(member)
self.member_dict.put(member, score, batch = batch)
if old_score != score:
self.rank.delete(member, old_score, batch = batch)
self.rank.put(member, score, batch = batch)
commit_write_batch(batch)
def get(self, member):
return self.member_dict.get(member)
def delete(self, member):
with get_write_lock(member):
batch = create_write_batch()
old_score = self.member_dict.get(member)
if old_score != None:
self.member_dict.delete(member, batch = batch)
self.rank.delete(member, old_score, batch = batch)
commit_write_batch(batch)
def list_by_score(self, *args, **kw):
result = []
for member in self.rank.list(*args, **kw):
item = (member, self.get(member))
result.append(item)
return result
|
dparks1134/STAMP
|
stamp/plugins/samples/plots/configGUI/barUI.py
|
Python
|
gpl-3.0
| 9,975
| 0.00381
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'bar.ui'
#
# Created: Thu Aug 11 10:41:59 2011
# by: PyQt4 UI code generator 4.8.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_BarConfigDialog(object):
def setupUi(self, BarConfigDialog):
BarConfigDialog.setObjectName(_fromUtf8("BarConfigDialog"))
BarConfigDialog.resize(341, 241)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(BarConfigDialog.sizePolicy().hasHeightForWidth())
BarConfigDialog.setSizePolicy(sizePolicy)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/icons/programIcon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
BarConfigDialog.setWindowIcon(icon)
self.verticalLayout_4 = QtGui.QVBoxLayout(BarConfigDialog)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.lblFieldToPlot = QtGui.QLabel(BarConfigDialog)
self.lblFieldToPlot.setObjectName(_fromUtf8("lblFieldToPlot"))
self.horizontalLayout.addWidget(self.lblFieldToPlot)
self.cboFieldToPlot = QtGui.QComboBox(BarConfigDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cboFieldToPlot.sizePolicy().hasHeightForWidth())
self.cboFieldToPlot.setSizePolicy(sizePolicy)
self.cboFieldToPlot.setObjectName(_fromUtf8("cboFieldToPlot"))
self.horizontalLayout.addWidget(self.cboFieldToPlot)
self.chkSort = QtGui.QCheckBox(BarConfigDialog)
self.chkSort.setObjectName(_fromUtf8("chkSort"))
self.horizontalLayout.addWidget(self.chkSort)
self.verticalLayout_4.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.groupBox_3 = QtGui.QGroupBox(BarConfigDialog)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.formLayout_2 = QtGui.QFormLayout()
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.lblFigureWidth = QtGui.QLabel(self.groupBox_3)
self.lblFigureWidth.setObjectName(_fromUtf8("lblFigureWidth"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblFigureWidth)
self.spinFigWidth = QtGui.QDoubleSpinBox(self.groupBox_3)
self.spinFigWidth.setDecimals(2)
self.spinFigWidth.setMinimum(2.0)
self.spinFigWidth.setMaximum(20.0)
self.spinFigWidth.setSingleStep(0.5)
self.spinFigWidth.setProperty(_fromUtf8("value"), 8.5)
self.spinFigWidth.setObjectName(_fromUtf8("spinFigWidth"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.spinFigWidth)
self.lblFigureHeight = QtGui.QLabel(self.groupBox_3)
self.lblFigureHeight.setObjectName(_fromUtf8("lblFigureHeight"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblFigureHeight)
self.spinFigRowHeight = QtGui.QDoubleSpinBox(self.groupBox_3)
self.spinFigRowHeight.setMinimum(0.1)
self.spinFigRowHeight.setMaximum(1.0)
self.spinFigRowHeight.setSingleStep(0.05)
self.spinFigRowHeight.setProperty(_fromUtf8("value"), 0.25)
self.spinFigRowHeight.setObjectName(_fromUtf8("spinFigRowHeight"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.spinFigRowHeight)
self.verticalLayout_3.addLayout(self.formLayout_2)
self.horizontalLayout_2.addWidget(self.groupBox_3)
self.verticalLayout_4.addLayout(self.horizontalLayout_2)
self.groupBox = QtGui.QGroupBox(BarConfigDialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.radioLegendPosUpperLeft = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosUpperLeft.setObjectName(_fromUtf8("radioLegendPosUpperLeft"))
self.gridLayout.addWidget(self.radioLegendPosUpperLeft, 0, 2, 1, 1)
self.radioLegendPosCentreLeft = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosCentreLeft.setObjectName(_fromUtf8("radioLegendPosCentreLeft"))
self.gridLayout.addWidget(self.radioLegendPosCentreLeft, 0, 3, 1, 1)
self.radioLegendPosLowerLeft = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosLowerLeft.setChecked(False)
self.radioLegendPosLowerLeft.setObjectName(_fromUtf8("radi
|
oLegendPosLowerLeft"))
self.gridLayout.addWidget(self.radioLegendPosLowerLeft, 0, 4, 1, 1)
self.radioLegendPosUpperRight = QtGui.QRadioButton(self.
|
groupBox)
self.radioLegendPosUpperRight.setObjectName(_fromUtf8("radioLegendPosUpperRight"))
self.gridLayout.addWidget(self.radioLegendPosUpperRight, 1, 2, 1, 1)
self.radioLegendPosCentreRight = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosCentreRight.setObjectName(_fromUtf8("radioLegendPosCentreRight"))
self.gridLayout.addWidget(self.radioLegendPosCentreRight, 1, 3, 1, 1)
self.radioLegendPosLowerRight = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosLowerRight.setObjectName(_fromUtf8("radioLegendPosLowerRight"))
self.gridLayout.addWidget(self.radioLegendPosLowerRight, 1, 4, 1, 1)
self.radioLegendPosBest = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosBest.setChecked(True)
self.radioLegendPosBest.setObjectName(_fromUtf8("radioLegendPosBest"))
self.gridLayout.addWidget(self.radioLegendPosBest, 1, 1, 1, 1)
self.radioLegendPosNone = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosNone.setObjectName(_fromUtf8("radioLegendPosNone"))
self.gridLayout.addWidget(self.radioLegendPosNone, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.verticalLayout_4.addWidget(self.groupBox)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
spacerItem = QtGui.QSpacerItem(100, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.buttonBox = QtGui.QDialogButtonBox(BarConfigDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(False)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.horizontalLayout_3.addWidget(self.buttonBox)
self.verticalLayout_4.addLayout(self.horizontalLayout_3)
self.retranslateUi(BarConfigDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), BarConfigDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), BarConfigDialog.reject)
QtCore.QMetaObject.connectSlotsByName(BarConfigDialog)
def retranslateUi(self, BarConfigDialog):
BarConfigDialog.setWindowTitle(QtGui.QApplication.translate("BarConfigDialog", "Bar plot", None, QtGui.QApplication.UnicodeUTF8))
self.lblFieldToPlot.setText(QtGui.QApplication.translate("BarConfigDialog", "Field to plot:", None, QtGui.QApplication.UnicodeUTF8))
self.chkSort.setText(QtGui.QA
|
breenmachine/clusterd
|
src/module/invoke_payload.py
|
Python
|
mit
| 3,120
| 0.003526
|
from src.module.deploy_utils import parse_war_path
from commands import getoutput
from log import LOG
import utility
def invoke(fingerengine, fingerprint, deployer):
"""
"""
if fingerengine.service in ["jboss", "tomcat"]:
return invoke_war(fingerengine, fingerprint)
elif fingerengine.service in ["coldfusion"]:
return invoke_cf(fingerengine, fingerprint, deployer)
else:
utility.Msg("Platform %s does not support --invoke" %
fingerengine.options.remote_service, LOG.ERROR)
def invoke_war(fingerengine, fingerprint):
""" Invoke a deployed WAR file on the remote server.
This uses unzip because Python's zip module isn't very portable or
fault tolerant; i.e. it fails to parse msfpayload-generated WARs, though
this is a fault of metasploit, not the Python module.
"""
dfile = fingerengine.options.deploy
jsp = getoutput("unzip -l %s | grep jsp" % dfile).split(' ')[-1]
if jsp == '':
utility.Msg("Failed to find a JSP in the deployed WAR", LOG.DEBUG)
return
else:
utility.Msg("Using JSP {0} from {1} to invoke".format(jsp, dfile), LOG.DEBUG)
url = "http://{0}:{1}/{2}/{3}".format(fingerengine.options.ip,
fingerprint.port,
parse_war_path(dfile),
jsp)
if _invoke(url):
utility.Msg("{0} invoked at {1}".format(dfile, fingerengine.options.ip))
else:
utility.Msg("Failed to invoke {0}".format(parse_war_path(dfile, True)),
LOG.ERROR)
def invoke_cf(fingerengine, fingerprint, deployer):
"""
"""
dfile = parse_war_path(fingerengine.options.deploy, True)
if fingerprint.version in ["10.0"]:
# deployments to 10 require us to trigger a 404
url = "http://{0}:{1}/CFIDE/ad123.cfm".format(fingerengine.options.ip,
fingerprint.port)
elif fingerprint.version in ["8.0"] and "fck_editor" in deployer.__name__:
# invoke a shell via FCKeditor deployer
url = "http://{0}:{1}/userfiles/file
|
/{2}".format(fingerengine.options.ip,
fingerprint.port,
|
dfile)
else:
url = "http://{0}:{1}/CFIDE/{2}".format(fingerengine.options.ip,
fingerprint.port,
dfile)
if _invoke(url):
utility.Msg("{0} invoked at {1}".format(dfile, fingerengine.options.ip))
else:
utility.Msg("Failed to invoke {0}".format(dfile), LOG.ERROR)
def _invoke(url):
""" Make the request
"""
status = False
try:
response = utility.requests_get(url)
if response.status_code == 200:
status = True
except Exception, e:
utility.Msg("Failed to invoke payload: %s" % e, LOG.ERROR)
status = False
return status
|
billzorn/fpunreal
|
titanfp/web/old_webdemo.py
|
Python
|
mit
| 14,240
| 0.00302
|
#!/usr/bin/env python
import sys
import os
import threading
import traceback
import json
import multiprocessing
import subprocess
import http
import html
import urllib
import argparse
from .aserver import AsyncCache, AsyncTCPServer, AsyncHTTPRequestHandler
from ..fpbench import fpcparser
from ..arithmetic import nati
|
ve, np
from ..arithmetic import softfloat, soft
|
posit
from ..arithmetic import ieee754, posit
from ..arithmetic import sinking
from ..arithmetic import canonicalize
from ..arithmetic import evalctx
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, 'index.html'), 'rb') as f:
index = f.read()
with open(os.path.join(here, 'evaluate.html'), 'rb') as f:
evaluate_page = f.read()
with open(os.path.join(here, 'translate.html'), 'rb') as f:
translate_page = f.read()
with open(os.path.join(here, 'titanic.css'), 'rb') as f:
css = f.read()
with open(os.path.join(here, 'titanfp.min.js'), 'rb') as f:
bundle = f.read()
with open(os.path.join(here, '../../../www/favicon.ico'), 'rb') as f:
favicon = f.read()
with open(os.path.join(here, '../../../www/piceberg_round.png'), 'rb') as f:
logo = f.read()
fpbench_root = '/home/bill/private/research/origin-FPBench'
fpbench_tools = os.path.join(fpbench_root, 'tools')
fpbench_benchmarks = os.path.join(fpbench_root, 'benchmarks')
def run_tool(toolname, core, *args):
tool = subprocess.Popen(
args=['racket', os.path.join(fpbench_tools, toolname), *args],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = tool.communicate(input=core.sexp.encode('utf-8'))
success = True
retval = tool.wait()
if retval != 0:
success = False
print('subprocess:\n {}\nreturned {:d}'.format(' '.join(tool.args), retval),
file=sys.stderr, flush=True)
if stderr_data:
print(stderr_data, file=sys.stderr, flush=True)
return success, stdout_data.decode('utf-8')
def filter_cores(*args, benchmark_dir = fpbench_benchmarks):
if not os.path.isdir(benchmark_dir):
raise ValueError('{}: not a directory'.format(benchmark_dir))
names = os.listdir(benchmark_dir)
benchmark_files = [name for name in names
if name.lower().endswith('.fpcore')
and os.path.isfile(os.path.join(benchmark_dir, name))]
cat = subprocess.Popen(
cwd=benchmark_dir,
args=['cat', *benchmark_files],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
cat.stdin.close()
tool = subprocess.Popen(
args=['racket', os.path.join(fpbench_tools, 'filter.rkt'), *args],
stdin=cat.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = tool.communicate()
# cleanup
success = True
for proc in [cat, tool]:
retval = proc.wait()
if retval != 0:
success = False
print('subprocess:\n {}\nreturned {:d}'.format(' '.join(proc.args), retval),
file=sys.stderr, flush=True)
cat_stderr_data = cat.stderr.read()
cat.stderr.close()
if cat_stderr_data:
print(cat_stderr_data, file=sys.stderr, flush=True)
if stderr_data:
print(stderr_data, file=sys.stderr, flush=True)
return success, stdout_data.decode('utf-8')
def demo_tool(success, output):
if success:
return output
else:
return 'Error - tool subprocess returned nonzero value'
def demo_arith(evaluator, arguments, core, ctx=None):
if arguments is None:
try:
return str(evaluator(core))
except Exception:
print('Exception in FPCore evaluation\n evaluator={}\n args={}\n core={}'
.format(repr(evaluator), repr(arguments), core.sexp))
traceback.print_exc()
return 'Error evaluating FPCore.'
else:
inputs = arguments.strip().split()
if len(inputs) != len(core.inputs):
return 'Error - wrong number of arguments (core expects {:d})'.format(len(core.inputs))
try:
return str(evaluator(core, inputs, ctx))
except Exception:
print('Exception in FPCore evaluation\n evaluator={}\n args={}\n core={}'
.format(repr(evaluator), repr(arguments), core.sexp))
traceback.print_exc()
return 'Error evaluating FPCore.'
class RaisingArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ValueError('unable to parse inputs')
DEFAULT_PROPAGATE = {'precision', 'round', 'math-library'}
DEFAULT_RECURSE = {'pre', 'spec'}
def parse_canon_args(args):
parser = RaisingArgumentParser(add_help=False)
parser.add_argument('--default', action='store_true')
parser.add_argument('--recurse', type=str, nargs='*')
parser.add_argument('--propagate', type=str, nargs='*')
ns = parser.parse_args(args.strip().split())
if ns.recurse is None and ns.propagate is None:
return DEFAULT_RECURSE, DEFAULT_PROPAGATE
if ns.recurse is None:
recurse = set()
else:
recurse = set(ns.recurse)
if ns.propagate is None:
propagate = set()
else:
propagate = set(ns.propagate)
if ns.default:
recurse.update(DEFAULT_RECURSE)
propagate.update(DEFAULT_PROPAGATE)
return recurse, propagate
def demo_canon(evaluator, arguments, core, use_prop=False):
try:
recurse, propagate = parse_canon_args(arguments)
except Exception:
print('Exception parsing arguments for canonicalizer: {}'.format(repr(arguments)))
traceback.print_exc()
return 'Error parsing arguments.'
try:
if use_prop:
return evaluator(core, recurse=recurse, propagate=propagate).sexp
else:
return evaluator(core, recurse=recurse).sexp
except Exception:
print('Exception in FPCore translation\n translator={}\n recurse={}\n propagate={}\n use_prop={}\n core={}'
.format(repr(evaluator), repr(recurse), repr(propagate), repr(use_prop), core.sexp))
traceback.print_exc()
return 'Error translating FPCore.'
class TitanfpHTTPRequestHandler(AsyncHTTPRequestHandler):
def import_core_from_query(self, content, query):
qd = urllib.parse.parse_qs(query)
return content.decode('utf-8').format(qd.get('core', [''])[-1]).encode('utf-8')
def construct_content(self, data):
pr = self.translate_path()
if pr.path == '/titanfp.min.js':
response = http.server.HTTPStatus.OK
msg = None
headers = (
('Content-Type', 'text/javascript'),
)
content = bundle
elif pr.path == '/titanic.css':
response = http.server.HTTPStatus.OK
msg = None
headers = (
('Content-Type', 'text/css'),
)
content = css
else:
response = http.server.HTTPStatus.OK
msg = None
if data is None:
if pr.path == '/favicon.ico':
headers = (
('Content-Type', 'image/x-icon'),
)
content = favicon
elif pr.path == '/piceberg_round.png':
headers = (
('Content-Type', 'image/png'),
)
content = logo
# elif pr.path == '/evaluate':
else:
headers = (
('Content-Type', 'text/html'),
)
content = self.import_core_from_query(evaluate_page, pr.query)
# elif pr.path == '/translate':
# headers = (
# ('Content-Type', 'text/html'),
# )
# content = self.import_core_from_query(translate_page, pr.query)
# else:
# print(pr)
|
SpectoLabs/myna
|
contrib/python-myna/myna/__init__.py
|
Python
|
apache-2.0
| 180
| 0.011111
|
from . import shim
tmpdir = Non
|
e
def setUp():
global tmpdir
tmpdir = shim.setup_shim_for('kubectl')
def tearDown():
global tmpdir
|
shim.teardown_shim_dir(tmpdir)
|
lowRISC/opentitan
|
util/tlgen/__init__.py
|
Python
|
apache-2.0
| 465
| 0
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from .
|
doc import selfdoc # noqa: F401
from .elaborate import elaborate # noqa: F401
from .generate import generate # noqa: F401
from .generate_tb import generate_tb # noqa: F401
from .item import Edge, Node, NodeType # noqa: F401
from .validate import validate # noqa: F401
from .xbar
|
import Xbar # noqa: F401
|
tcstewar/boardgame
|
boardgame/card.py
|
Python
|
gpl-2.0
| 59
| 0.016949
|
i
|
mport boardgame as bg
class Card(bg.GamePiece):
pass
| |
imankulov/linguee-api
|
tests/test_linguee_client.py
|
Python
|
mit
| 863
| 0
|
import pytest
from linguee_api.const import LANGUAGE_CODE, LANGUAGES
from linguee_api.linguee_client import LingueeClient
from linguee_api.models import SearchResult
@pytest.mark.asyncio
async def test_linguee_client_should_redirect_on_not_found(
linguee_client: LingueeClient,
):
search_result = await linguee_client.process_search_result(
query="constibado", src="pt", dst="en", guess_direction=False
)
assert search_result.query == "constipado"
@pytest.mark.asyncio
@pytest.mark.parametrize("lang", list(LANGUAGES.keys()))
|
async def test_linguee_client_should_process_test_requests(
linguee_client: LingueeClient,
lang: LANGUAGE_CODE,
):
search_result = await linguee_client.process_search_result(
query="test", src="en", dst=lang, guess_direction=False
)
ass
|
ert isinstance(search_result, SearchResult)
|
Eylesis/Botfriend
|
Cogs/GameTime.py
|
Python
|
mit
| 1,883
| 0.012746
|
import discord
from discord.ext import commands
import time
import datetime
import pytz
class GameTime(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def time(self, ctx):
"""Displays current game time."""
locationName = self.bot.db.get_val("ServerInfo", "")
print(type(locationName))
print(locationName['CityName'])
embed = discord.Embed(title="Current time in {}".format(locationName['CityName']),description=get_gametime())
await ctx.send(embed=embed)
await ctx.message.delete_message()
def suffix(d):
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def get_rawtime():
return datetime.datetime.now(pytz.timezone('UTC'))
def get_gametime():
months = [
"Hammer",
"Alturiak",
"Ches",
"Tarsakh",
"Mirtul",
"Kythorn",
"Flamerule",
"Eleasis",
"Eleint",
"Marpenoth",
"Uktar",
"Nightal"]
aDate = datetime(2020, 10, 18, tzinfo=pytz.timezone('UTC'))
bDate = datetime.now(pytz.timezone('UTC'))
delta = bDate - aDate
gametime = datetime(2020, 10, 18, bDate.hour, bDate.minut
|
e, bDate.second) + timedelta(days=delta.days*3) + (timedelta(days=(bDate.hour//8-2)))
if gametime.hour == 0:
gametime_hour = 12
time_decor = "AM"
else:
gametime_hour = gametime.hour-12 if gametime.hour > 12 else gametime.hour
time_decor = "PM" if gametime.hour > 12 else "AM"
gametime_minute = "0{}".format(gametime.minute) if gametime.minute < 10 else gametime.minute
return "{}:{} {} UTC | {}{} of {}".forma
|
t(gametime_hour, gametime_minute, time_decor, gametime.day, suffix(gametime.day), months[gametime.month-1])
def setup(bot):
bot.add_cog(GameTime(bot))
|
casbeebc/abenity-python
|
setup.py
|
Python
|
mit
| 1,394
| 0
|
#!/usr/bin/env python
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
clas
|
s PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--strict', '--verbose', '--tb=long', '-s', 'tests']
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='abenity',
packages=['abenity'],
version='0.0.4',
descript
|
ion='Abenity API client',
long_description='A Python library for using the Abenity API.',
url='https://github.com/casbeebc/abenity-python',
author='Brett Casbeer',
author_email='brett.casbeer@gmail.com',
license='MIT',
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
setup_requires=['setuptools>=17.1'],
install_requires=['requests==2.20.0',
'pycryptodome==3.6.6',
'six==1.10.0'],
extras_require={'testing': ['pytest']},
tests_require=['pytest'],
)
|
4shadoww/usploit
|
lib/scapy/contrib/automotive/uds.py
|
Python
|
mit
| 44,266
| 0
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Nils Weiss <nils@we155.de>
# This program is published under a GPLv2 license
# scapy.contrib.description = Unified Diagnostic Service (UDS)
# scapy.contrib.status = loads
import struct
import time
from itertools import product
from scapy.fields import ByteEnumField, StrField, ConditionalField, \
BitEnumField, BitField, XByteField, FieldListField, \
XShortField, X3BytesField, XIntField, ByteField, \
ShortField, ObservableDict, XShortEnumField, XByteEnumField, StrLenField, \
FieldLenField
from scapy.packet import Packet, bind_layers, NoPayload
from scapy.config import conf
from scapy.error import log_loading
from scapy.utils import PeriodicSenderThread
from scapy.contrib.isotp import ISOTP
from scapy.compat import Dict, Union
"""
UDS
"""
try:
if conf.contribs['UDS']['treat-response-pending-as-answer']:
pass
except KeyError:
log_loading.info("Specify \"conf.contribs['UDS'] = "
"{'treat-response-pending-as-answer': True}\" to treat "
"a negative response 'requestCorrectlyReceived-"
"ResponsePending' as answer of a request. \n"
"The default value is False.")
conf.contribs['UDS'] = {'treat-response-pending-as-answer': False}
class UDS(ISOTP):
services = ObservableDict(
{0x10: 'DiagnosticSessionControl',
0x11: 'ECUReset',
0x14: 'ClearDiagnosticInformation',
0x19: 'ReadDTCInformation',
0x22: 'ReadDataByIdentifier',
0x23: 'ReadMemoryByAddress',
0x24: 'ReadScalingDataByIdentifier',
0x27: 'SecurityAccess',
0x28: 'CommunicationControl',
0x2A: 'ReadDataPeriodicIdentifier',
0x2C: 'DynamicallyDefineDataIdentifier',
0x2E: 'WriteDataByIdentifier',
0x2F: 'InputOutputControlByIdentifier',
0x31: 'RoutineControl',
0x34: 'RequestDownload',
0x35: 'RequestUpload',
0x36: 'TransferData',
0x37: 'RequestTransferExit',
0x38: 'RequestFileTransfer',
0x3D: 'WriteMemoryByAddress',
0x3E: 'TesterPresent',
0x50: 'DiagnosticSessionControlPositiveResponse',
0x51: 'ECUResetPositiveResponse',
0x54: 'ClearDiagnosticInformationPositiveResponse',
0x59: 'ReadDTCInformationPositiveResponse',
|
0x62: 'ReadDataByIdentifierPositiveResponse',
0x63: 'ReadMemoryByAddressPositiveResponse',
0x64: 'ReadScalingDataByIdentifierPositiveResponse',
0x67: 'SecurityAccessPositiveResponse',
|
0x68: 'CommunicationControlPositiveResponse',
0x6A: 'ReadDataPeriodicIdentifierPositiveResponse',
0x6C: 'DynamicallyDefineDataIdentifierPositiveResponse',
0x6E: 'WriteDataByIdentifierPositiveResponse',
0x6F: 'InputOutputControlByIdentifierPositiveResponse',
0x71: 'RoutineControlPositiveResponse',
0x74: 'RequestDownloadPositiveResponse',
0x75: 'RequestUploadPositiveResponse',
0x76: 'TransferDataPositiveResponse',
0x77: 'RequestTransferExitPositiveResponse',
0x78: 'RequestFileTransferPositiveResponse',
0x7D: 'WriteMemoryByAddressPositiveResponse',
0x7E: 'TesterPresentPositiveResponse',
0x83: 'AccessTimingParameter',
0x84: 'SecuredDataTransmission',
0x85: 'ControlDTCSetting',
0x86: 'ResponseOnEvent',
0x87: 'LinkControl',
0xC3: 'AccessTimingParameterPositiveResponse',
0xC4: 'SecuredDataTransmissionPositiveResponse',
0xC5: 'ControlDTCSettingPositiveResponse',
0xC6: 'ResponseOnEventPositiveResponse',
0xC7: 'LinkControlPositiveResponse',
0x7f: 'NegativeResponse'}) # type: Dict[int, str]
name = 'UDS'
fields_desc = [
XByteEnumField('service', 0, services)
]
def answers(self, other):
# type: (Union[UDS, Packet]) -> bool
if other.__class__ != self.__class__:
return False
if self.service == 0x7f:
return self.payload.answers(other)
if self.service == (other.service + 0x40):
if isinstance(self.payload, NoPayload) or \
isinstance(other.payload, NoPayload):
return len(self) <= len(other)
else:
return self.payload.answers(other.payload)
return False
def hashret(self):
# type: () -> bytes
if self.service == 0x7f:
return struct.pack('B', self.requestServiceId)
return struct.pack('B', self.service & ~0x40)
# ########################DSC###################################
class UDS_DSC(Packet):
diagnosticSessionTypes = ObservableDict({
0x00: 'ISOSAEReserved',
0x01: 'defaultSession',
0x02: 'programmingSession',
0x03: 'extendedDiagnosticSession',
0x04: 'safetySystemDiagnosticSession',
0x7F: 'ISOSAEReserved'})
name = 'DiagnosticSessionControl'
fields_desc = [
ByteEnumField('diagnosticSessionType', 0, diagnosticSessionTypes)
]
bind_layers(UDS, UDS_DSC, service=0x10)
class UDS_DSCPR(Packet):
name = 'DiagnosticSessionControlPositiveResponse'
fields_desc = [
ByteEnumField('diagnosticSessionType', 0,
UDS_DSC.diagnosticSessionTypes),
StrField('sessionParameterRecord', b"")
]
def answers(self, other):
return isinstance(other, UDS_DSC) and \
other.diagnosticSessionType == self.diagnosticSessionType
bind_layers(UDS, UDS_DSCPR, service=0x50)
# #########################ER###################################
class UDS_ER(Packet):
resetTypes = {
0x00: 'ISOSAEReserved',
0x01: 'hardReset',
0x02: 'keyOffOnReset',
0x03: 'softReset',
0x04: 'enableRapidPowerShutDown',
0x05: 'disableRapidPowerShutDown',
0x41: 'powerDown',
0x7F: 'ISOSAEReserved'}
name = 'ECUReset'
fields_desc = [
ByteEnumField('resetType', 0, resetTypes)
]
bind_layers(UDS, UDS_ER, service=0x11)
class UDS_ERPR(Packet):
name = 'ECUResetPositiveResponse'
fields_desc = [
ByteEnumField('resetType', 0, UDS_ER.resetTypes),
ConditionalField(ByteField('powerDownTime', 0),
lambda pkt: pkt.resetType == 0x04)
]
def answers(self, other):
return isinstance(other, UDS_ER)
bind_layers(UDS, UDS_ERPR, service=0x51)
# #########################SA###################################
class UDS_SA(Packet):
name = 'SecurityAccess'
fields_desc = [
ByteField('securityAccessType', 0),
ConditionalField(StrField('securityAccessDataRecord', b""),
lambda pkt: pkt.securityAccessType % 2 == 1),
ConditionalField(StrField('securityKey', b""),
lambda pkt: pkt.securityAccessType % 2 == 0)
]
bind_layers(UDS, UDS_SA, service=0x27)
class UDS_SAPR(Packet):
name = 'SecurityAccessPositiveResponse'
fields_desc = [
ByteField('securityAccessType', 0),
ConditionalField(StrField('securitySeed', b""),
lambda pkt: pkt.securityAccessType % 2 == 1),
]
def answers(self, other):
return isinstance(other, UDS_SA) \
and other.securityAccessType == self.securityAccessType
bind_layers(UDS, UDS_SAPR, service=0x67)
# #########################CC###################################
class UDS_CC(Packet):
controlTypes = {
0x00: 'enableRxAndTx',
0x01: 'enableRxAndDisableTx',
0x02: 'disableRxAndEnableTx',
0x03: 'disableRxAndTx'
}
name = 'CommunicationControl'
fields_desc = [
ByteEnumField('controlType', 0, controlTypes),
BitEnumField('communicationType0', 0, 2,
{0: 'ISOSAEReserved',
1: 'normalCommunicationMessages',
2: 'networkManagmentCommunicationMessages',
3: 'networkManagmentCommunicationMessages and '
|
CountZer0/PipelineConstructionSet
|
python/maya/site-packages/pymel-1.0.3/pymel/util/utilitytypes.py
|
Python
|
bsd-3-clause
| 43,474
| 0.006073
|
"""
Defines common types and type related utilities: Singleton, etc.
These types can be shared by other utils modules and imported into util main namespace for use by other pymel modules
"""
import inspect, types, operator, sys, warnings
class Singleton(type):
""" Metaclass for Singleton classes.
|
>>> class DictSingleton(dict) :
... __metaclass__ = Singleton
|
...
>>> DictSingleton({'A':1})
{'A': 1}
>>> a = DictSingleton()
>>> a
{'A': 1}
>>> b = DictSingleton({'B':2})
>>> a, b, DictSingleton()
({'B': 2}, {'B': 2}, {'B': 2})
>>> a is b and a is DictSingleton()
True
>>> class StringSingleton(str) :
... __metaclass__ = Singleton
...
>>> StringSingleton("first")
'first'
>>> a = StringSingleton()
>>> a
'first'
>>> b = StringSingleton("changed")
>>> a, b, StringSingleton()
('first', 'first', 'first')
>>> a is b and a is StringSingleton()
True
>>> class DictSingleton2(DictSingleton):
... pass
...
>>> DictSingleton2({'A':1})
{'A': 1}
>>> a = DictSingleton2()
>>> a
{'A': 1}
>>> b = DictSingleton2({'B':2})
>>> a, b, DictSingleton2()
({'B': 2}, {'B': 2}, {'B': 2})
>>> a is b and a is DictSingleton2()
True
"""
def __new__(mcl, classname, bases, classdict):
# newcls = super(Singleton, mcl).__new__(mcl, classname, bases, classdict)
# redefine __new__
def __new__(cls, *p, **k):
if '_the_instance' not in cls.__dict__:
cls._the_instance = super(newcls, cls).__new__(cls, *p, **k)
return cls._the_instance
newdict = { '__new__': __new__}
# define __init__ if it has not been defined in the class being created
def __init__(self, *p, **k):
cls = self.__class__
if p :
if hasattr(self, 'clear') :
self.clear()
else :
super(newcls, self).__init__()
super(newcls, self).__init__(*p, **k)
if '__init__' not in classdict :
newdict['__init__'] = __init__
# Note: could have defined the __new__ method like it is done in Singleton but it's as easy to derive from it
for k in classdict :
if k in newdict :
warnings.warn("Attribute %r is predefined in class %r of type %r and can't be overriden" % (k, classname, mcl.__name__))
else :
newdict[k] = classdict[k]
newcls = super(Singleton, mcl).__new__(mcl, classname, bases, newdict)
return newcls
class metaStatic(Singleton) :
""" A static (immutable) Singleton metaclass to quickly build classes
holding predefined immutable dicts
>>> class FrozenDictSingleton(dict) :
... __metaclass__ = metaStatic
...
>>> FrozenDictSingleton({'A':1})
{'A': 1}
>>> a = FrozenDictSingleton()
>>> a
{'A': 1}
>>> b = FrozenDictSingleton()
>>> a, b
({'A': 1}, {'A': 1})
>>> a is b
True
>>> b = FrozenDictSingleton({'B':2})
Traceback (most recent call last):
...
TypeError: 'FrozenDictSingleton' object does not support redefinition
>>> a['A']
1
>>> a['A'] = 2 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: '<class '...FrozenDictSingleton'>' object does not support item assignation
>>> a.clear()
Traceback (most recent call last):
...
AttributeError: 'FrozenDictSingleton' object has no attribute 'clear'
>>> a, b, FrozenDictSingleton()
({'A': 1}, {'A': 1}, {'A': 1})
>>> a is b and a is FrozenDictSingleton()
True
>>> class StaticTest(FrozenDictSingleton):
... pass
...
>>> StaticTest({'A': 1})
{'A': 1}
>>> a = StaticTest()
>>> a
{'A': 1}
>>> b = StaticTest()
>>> a, b
({'A': 1}, {'A': 1})
>>> class StaticTest2( StaticTest ):
... pass
...
>>> StaticTest2({'B': 2})
{'B': 2}
>>> a = StaticTest2()
>>> a
{'B': 2}
>>> b = StaticTest2()
>>> a, b
({'B': 2}, {'B': 2})
"""
def __new__(mcl, classname, bases, classdict):
"""
"""
# redefine __init__
def __init__(self, *p, **k):
cls = self.__class__
# Can only create once)
if p :
# Can only init once
if not self:
return super(newcls, self).__init__(*p, **k)
else :
raise TypeError, "'"+classname+"' object does not support redefinition"
newdict = { '__init__':__init__}
# hide methods with might herit from a mutable base
def __getattribute__(self, name):
if name in newcls._hide :
raise AttributeError, "'"+classname+"' object has no attribute '"+name+"'"
else :
return super(newcls, self).__getattribute__(name)
newdict['__getattribute__'] = __getattribute__
_hide = ('clear', 'update', 'pop', 'popitem', '__setitem__', '__delitem__', 'append', 'extend' )
newdict['_hide'] = _hide
# prevent item assignation or deletion
def __setitem__(self, key, value) :
raise TypeError, "'%s' object does not support item assignation" % (self.__class__)
newdict['__setitem__'] = __setitem__
def __delitem__(self, key):
raise TypeError, "'%s' object does not support item deletion" % (self.__class__)
newdict['__delitem__'] = __delitem__
# Now add methods of the defined class, as long as it doesn't try to redefine
# Note: could have defined the __new__ method like it is done in Singleton but it's as easy to derive from it
for k in classdict :
if k in newdict :
warnings.warn("Attribute %r is predefined in class %r of type %r and can't be overriden" % (k, classname, mcl.__name__))
else :
newdict[k] = classdict[k]
newcls = super(metaStatic, mcl).__new__(mcl, classname, bases, newdict)
return newcls
try:
from collections import defaultdict
except:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.iteritems()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
class defaultlist(list):
def __init__(self, default_factory, *args, **kwargs ):
if (default_factory is not None and
|
alexei-matveev/ase-local
|
ase/tasks/calcfactory.py
|
Python
|
gpl-2.0
| 6,674
| 0.00015
|
import optparse
from math import sqrt, pi
import numpy as np
from ase.dft.kpoints import monkhorst_pack
def str2dict(s, namespace={}, sep='='):
"""Convert comma-separated key=value string to dictionary.
Examples:
>>> str2dict('xc=PBE,nbands=200,parallel={band:4}')
{'xc': 'PBE', 'nbands': 200, 'parallel': {'band': 4}}
>>> str2dict('a=1.2,b=True,c=ab,d=1,2,3,e={f:42,g:cd}')
{'a': 1.2, 'c': 'ab', 'b': True, 'e': {'g': 'cd', 'f': 42}, 'd': (1, 2, 3)}
"""
dct = {}
s = (s + ',').split(sep)
for i in range(len(s) - 1):
key = s[i]
m = s[i + 1].rfind(',')
value = s[i + 1][:m]
if value[0] == '{':
assert value[-1] == '}'
value = str2dict(value[1:-1], {}, ':')
else:
try:
value = eval(value, namespace)
except (NameError, SyntaxError):
pass
dct[key] = value
s[i + 1] = s[i + 1][m + 1:]
return dct
class CalculatorFactory:
def __init__(self, Class, name, label='label',
kpts=None, kptdensity=3.0,
**kwargs):
"""Calculator factory object.
Used to create calculators with specific parameters."""
self.Class = Class
self.name = name
self.label = label
self.kpts = kpts
self.kptdensity = kptdensity
self.kwargs = kwargs
def calculate_kpts(self, atoms):
"""Estimate an appropriate number of k-points."""
if self.kpts is not None:
# Number of k-points was explicitely set:
return self.kpts
# Use kptdensity to make a good estimate:
recipcell = atoms.get_reciprocal_cell()
kpts = []
for i in range(3):
if atoms.pbc[i]:
k = 2 * pi * sqrt((recipcell[i]**2).sum()) * self.kptdensity
kpts.append(max(1, 2 * int(round(k / 2))))
else:
kpts.append(1)
return kpts
def __call__(self, name, atoms):
"""Create calculator.
Put name in the filename of all created files."""
kpts = self.calculate_kpts(atoms)
if kpts != 'no k-points':
if self.name == 'aims': # XXX Aims uses k_grid!
self.kwargs['k_grid'] = kpts
else:
self.kwargs['kpts'] = kpts
if self.label is not None:
self.kwargs[self.label] = name
return self.Class(**self.kwargs)
if self.label is None:
return self.Class(**self.kwargs)
else:
return self.Class(name, **self.kwargs)
def add_options(self, parser):
calc = optparse.OptionGroup(parser, 'Calculator')
calc.add_option('-k', '--monkhorst-pack',
metavar='K1,K2,K3',
help='Monkhorst-Pack sampling of BZ. Example: ' +
'"4,4,4": 4x4x4 k-points, "4,4,4g": same set of ' +
'k-points shifted to include the Gamma point.')
calc.add_option('--k-point-density', type='float', default=3.0,
|
help='Density of k-points in Angstrom.')
calc.add_option('-p', '--parameters', metavar='key=value,...',
help='Comma-separated key=value pairs of ' +
'calculator specific parameters.')
parser.add_option_group(calc)
def parse(self, opts, args):
mp = opts.monkhorst_pack
if mp is not None:
if mp[-1].lower() == 'g':
kpts = np.array([int(k) for k in mp[:-1].split(',')])
|
shift = 0.5 * ((kpts + 1) % 2) / kpts
self.kpts = monkhorst_pack(kpts) + shift
else:
self.kpts = [int(k) for k in mp.split(',')]
self.kptdensity = opts.k_point_density
if opts.parameters:
self.kwargs.update(str2dict(opts.parameters))
# Recognized names of calculators sorted alphabetically:
calcnames = ['abinit', 'aims', 'asap', 'castep', 'dftb', 'elk', 'emt',
'exciting', 'fleur', 'gpaw', 'gaussian', 'hotbit', 'jacapo',
'lammps', 'lj', 'mopac', 'morse',
'nwchem', 'siesta', 'turbomole', 'vasp']
classnames = {'asap': 'EMT',
'aims': 'Aims',
'elk': 'ELK',
'emt': 'EMT',
'fleur': 'FLEUR',
'gaussian': 'Gaussian',
'jacapo': 'Jacapo',
'lammps': 'LAMMPS',
'lj': 'LennardJones',
'mopac': 'Mopac',
'morse': 'MorsePotential',
'nwchem': 'NWChem',
'vasp': 'Vasp'}
def calculator_factory(name, **kwargs):
"""Create an ASE calculator factory."""
if name == 'abinit':
from ase.calculators.abinit import Abinit
return CalculatorFactory(Abinit, 'Abinit', 'label', **kwargs)
if name == 'aims':
from ase.calculators.aims import Aims
return CalculatorFactory(Aims, 'aims', 'label', **kwargs)
if name == 'nwchem':
from ase.calculators.nwchem import NWChem
return CalculatorFactory(NWChem, 'NWChem', 'label', 'no k-points',
**kwargs)
if name == 'asap':
from asap3 import EMT
return CalculatorFactory(EMT, 'Asap', None, 'no k-points', **kwargs)
if name == 'elk':
from ase.calculators.elk import ELK
return CalculatorFactory(ELK, 'ELK', 'label', **kwargs)
if name == 'fleur':
from ase.calculators.fleur import FLEUR
return CalculatorFactory(FLEUR, 'FLEUR', 'workdir', **kwargs)
if name == 'gpaw':
from gpaw.factory import GPAWFactory
return GPAWFactory(**kwargs)
if name == 'hotbit':
from hotbit import Calculator
return CalculatorFactory(Calculator, 'Hotbit', 'txt', 'no k-points',
**kwargs)
if name == 'jacapo':
from ase.calculators.jacapo import Jacapo
return CalculatorFactory(Jacapo, 'Jacapo', 'nc', **kwargs)
if name == 'vasp':
from ase.calculators.vasp import Vasp
return CalculatorFactory(Vasp, 'Vasp', None, **kwargs)
classname = classnames.get(name, name.title())
module = __import__('ase.calculators.' + name, {}, None, [classname])
Class = getattr(module, classname)
if name in ['emt', 'gaussian', 'lammps', 'lj', 'mopac', 'morse']:
kpts = 'no k-points'
else:
kpts = None
if name in ['emt', 'lj', 'morse']:
label = None
else:
label = 'label'
return CalculatorFactory(Class, classname, label, kpts, **kwargs)
|
thorgate/django-project-template
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/__init__.py
|
Python
|
isc
| 392
| 0.002551
|
import os
import sys
if os.environ.get("DJANGO_PRODUCTION_MODE"):
from settings.cloud import *
else:
# When not using production mode try to load local.py
|
try:
from settings.local import *
except ImportError:
sys.stderr.write(
"Couldn't import settings.local, have you created it from setti
|
ngs/local.py.example ?\n"
)
sys.exit(1)
|
dspichkin/djangodashpanel
|
djangodashpanel/security/views.py
|
Python
|
gpl-3.0
| 9,113
| 0.002743
|
import time
import json
import pytz
from datetime import datetime, timedelta
from django.utils import timezone
from django.conf import settings
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.permissions import IsAdminUser
from ..models.security import (
SecurityLoginAttemptIncorrect, SecurityLoginAttemptCorrect
)
@api_view(['GET'])
@permission_classes((IsAdminUser, ))
def correctlogins_data(request):
date_start_raw = request.GET.get('date_start')
date_end_raw = request.GET.get('date_end')
date_start_tz = None
date_end_tz = None
if not date_start_raw or not date_end_raw:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
else:
date_start = datetime.fromtimestamp(int(date_start_raw))
date_start_tz = pytz.timezone(settings.TIME_ZONE).localize(date_start, is_dst=None)
date_end = datetime.fromtimestamp(int(date_end_raw))
date_end_tz = pytz.timezone(settings.TIME_ZONE).localize(date_end, is_dst=None)
if date_start_tz == date_end_tz:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
count_hosts = []
temp_hosts = {}
temp_users = {}
dates = []
values = SecurityLoginAttemptCorrect.objects.filter(time__range=[date_start_tz, date_end_tz])
count_correct_attempt = 0
for p in values:
value = json.loads(p.value)
attempt_count = 0
for host, v in value.get("hosts", {}).items():
attempt_count += v.get("count", 0)
count_correct_attempt += attempt_count
raw_date = v.get("last_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if host in temp_hosts:
temp_hosts[host]["count"] = temp_hosts[host]["count"] + v.get("count", 0)
temp_hosts[host]["last_date"] = date_tz.strftime("%b %d %H:%M")
else:
temp_hosts[host] = {
"host": host,
"count": v.get("count", 0),
"last_date": date_tz.strftime("%b %d %H:%M")
}
for username, v in value.get("users", {}).items():
attempt_count += v.get("count", 0)
raw_date = v.get("last_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if username in temp_users:
temp_users[username]["count"] = temp_users[username]["count"] + v.get("count", 0)
temp_users[username]["last_date"] = date_tz.strftime("%b %d %H:%M")
else:
temp_users[username] = {
"username": username,
"count": v.get("count", 0),
"last_date": date_tz.strftime("%b %d %H:%M")
}
count_hosts.append(attempt_count)
dates.append(timezone.localtime(p.time).strftime("%b %d %H:%M"))
hosts = []
for i in temp_hosts:
hosts.append(temp_hosts[i])
if hosts:
hosts.sort(key=lambda x: x["count"], reverse=True)
hosts = hosts[:100]
users = []
for i in temp_users:
users.append(temp_users[i])
if users:
users.sort(key=lambda x: x["count"], reverse=True)
users = users[:100]
date_range = {
"start": time.mktime(timezone.localtime(date_start_tz).timetuple()), # time.mktime(timezone.localtime(timezone.now()).timetuple()),
"start_date": time.mktime(timezone.localtime(date_start_tz).timetuple()) + 10, # time.mktime(timezone.localtime(timezone.now()).timetuple()),
"end_date": time.mktime(timezone.localtime(date_end_tz).timetuple()),
}
if values:
date_range["start"] = time.mktime(timezone.localtime(values[0].time).timetuple())
start_obj = SecurityLoginAttemptCorrect.objects.all().first()
if start_obj:
date_range["start_date"] = time.mktime(timezone.localtime(start_obj.time).timetuple())
if date_range["start_date"] == date_range["end_date"]:
date_range["end_date"] += 10
return Response({
"values": [{
"data": count_hosts,
"label": 'Number of login'
}],
"dates": dates,
"date_range": date_range,
"count_correct_attempt": count_correct_attempt,
"hosts": hosts,
"users": users
}, status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes((IsAdminUser, ))
def incorrectlogins_data(request):
date_start_raw = request.GET.get('date_start')
date_end_raw = request.GET.get('date_end')
date_start_tz = None
date_end_tz = None
if not date_start_raw or not date_end_raw:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
else:
date_start = datetime.fromtimestamp(int(date_start_raw))
date_start_tz = pytz.timezone(settings.TIME_ZONE).localize(date_start, is_dst=None)
date_end = datetime.fromtimestamp(int(date_end_raw))
date_end_tz = pytz.timezone(settings.TIME_ZONE).localize(date_end, is_dst=None)
if date_start_tz == date_end_tz:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
count_incorrect_attepmt = 0
count_hosts = []
temp_hosts = {}
temp_users = {}
dates = []
values = SecurityLoginAttemptIncorrect.objects.filter(time__range=[date_start_tz, date_end_tz])
for p in values:
value = json.loads(p.value)
attempt_count = 0
for host, v in value.get("hosts", {}).items():
attempt_count += v.get("count", 0)
raw_date = v.get("la
|
st_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if host in temp_hosts:
temp_hosts[host]["count"] = temp_hosts[host]["count"] + v.get("count", 0)
temp_hosts[host]["last_date"]
|
= date_tz.strftime("%b %d %H:%M")
else:
temp_hosts[host] = {
"host": host,
"count": v.get("count", 0),
"last_date": date_tz.strftime("%b %d %H:%M")
}
for user, v in value.get("users", {}).items():
attempt_count += v.get("count")
raw_date = v.get("last_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if user in temp_users:
temp_users[user]["count"] = temp_users[user]["count"] + v.get("count")
temp_users[user]["last_date"] = date_tz.strftime("%b %d %H:%M")
else:
temp_users[user] = {
"username": user,
"count": v.get("count"),
"last_date": date_tz.strftime("%b %d %H:%M")
}
count_incorrect_attepmt += attempt_count
count_hosts.append(attempt_count)
dates.append(timezone.localtime(p.time).strftime("%b %d %H:%M"))
hosts = []
for i in temp_hosts:
hosts.append(temp_hosts[i])
if hosts:
hosts.sort(key=lambda x: x["count"], reverse=True)
hosts = hosts[:100]
users = []
for i in temp_users:
users.append(temp_users[i])
if users:
users.sort(key=lambda x: x["count"], reverse=True)
users = users[:100]
date_range = {
"start": time.mktime(timezone.localtime(date_start_tz).timetuple()), # time.mktime(timezone.localtime(timezone.now()).timetuple()),
"start_date": time.mkt
|
karel-brinda/smbl
|
smbl/utils/rule.py
|
Python
|
mit
| 572
| 0.050699
|
import smbl
__RULES=set()
def
|
register_rule(rule):
registered_rules=[r.encode() for r in get_registered_rules()]
if rule.encode() not in registered_rules:
__RULES.add(rule)
def get_registered_rules():
return list(__RULES)
class Rule:
def __init__(self,input,output,run):
self.__input=input
self.__output=output
self.__run=run
register_rule(self)
def get_input(self)
|
:
return self.__input
def get_output(self):
return self.__output
def run(self):
self.__run()
def encode(self):
return "{} {}".format(str(self.__input),str(self.__output))
|
kevin-intel/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py
|
Python
|
bsd-3-clause
| 6,256
| 0
|
import numpy as np
from numpy.testing import assert_allclose
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import pytest
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
from sklearn.ensemble._hist_gradient_boosting.common import (
G_H_DTYPE, PREDICTOR_RECORD_DTYPE, ALMOST_INF, X_BINNED_DTYPE,
X_BITSET_INNER_DTYPE, X_DTYPE)
from sklearn.ensemble._hist_gradient_boosting._bitset import (
set_bitset_memoryview, set_raw_bitset_from_binned_bitset)
@pytest.mark.parametrize('n_bins', [200, 256])
def test_regression_dataset(n_bins):
X, y = make_regression(n_samples=500, n_features=10, n_informative=5,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42)
mapper = _BinMapper(n_bins=n_bins, random_state=42)
X_train_binned = mapper.fit_transform(X_train)
# Init gradients and hessians to that of least squares loss
gradients = -y_train.astype(G_H_DTYPE)
hessians = np.ones(1, dtype=G_H_DTYPE)
min_samples_leaf = 10
max_leaf_nodes = 30
grower = TreeGrower(X_train_binned, gradients, hessians,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=max_leaf_nodes, n_bins=n_bins,
n_bins_non_missing=mapper.n_bins_non_missing_)
grower.grow()
predictor = grower.make_predictor(
binning_thresholds=mapper.bin_thresholds_)
known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
f_idx_map = np.zeros(0, dtype=np.uint32)
y_pred_train = predictor.predict(X_train, known_cat_bitsets, f_idx_map)
assert r2_score(y_train, y_pred_train) > 0.82
y_pred_test = predictor.predict(X_test, known_cat_bitsets, f_idx_map)
assert r2_score(y_test, y_pred_test) > 0.67
@pytest.mark.parametrize('num_threshold, expected_predictions', [
(-np.inf, [0, 1, 1, 1]),
(10, [0, 0, 1, 1]),
(20, [0, 0, 0, 1]),
(ALMOST_INF, [0, 0, 0, 1]),
(np.inf, [0, 0, 0, 0]),
])
def test_infinite_values_and_thresholds(num_threshold, expected_predictions):
# Make sure infinite values and infinite thresholds are handled properly.
# In particular, if a value is +inf and the threshold is ALMOST_INF the
# sample should go to the right child. If the threshold is inf (split on
# nan), the +inf sample will go to the left child.
X = np.array([-np.inf, 10, 20, np.inf]).reshape(-1, 1)
nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
# We just construct a simple tree with 1 root and 2 children
# parent node
nodes[0]['left'] = 1
nodes[0]['right'] = 2
nodes[0]['feature_idx'] = 0
nodes[0]['num_threshold'] = num_threshold
# left child
nodes[1]['is_leaf'] = True
nodes[1]['value'] = 0
# right child
nodes[2]['is_leaf'] = True
nodes[2]['value'] = 1
binned_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
raw_categorical_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
known_cat_bitset = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
f_idx_map = np.zeros(0, dtype=np.uint32)
predictor = TreePredictor(
nodes, binned_cat_bitsets, raw_categorical_bitsets)
predictions = predictor.predict(X, known_cat_bitset, f_idx_map)
assert np.all(predictions == expected_predictions)
@pytest.mark.parametrize(
'bins_go_left, expected_predictions', [
([0, 3, 4, 6], [1, 0, 0, 1, 1, 0]),
([0, 1, 2, 6], [1, 1, 1, 0, 0, 0]),
([3, 5, 6], [0, 0, 0, 1, 0, 1])
])
def test_categorical_predictor(bins_go_left, expected_predictions):
# Test predictor outputs are correct with categorical features
X_binned = np.array([[0, 1, 2, 3, 4, 5]], dtype=X_BINNED_DTYPE).T
categories = np.array([2, 5, 6, 8, 10, 15], dtype=X_DTYPE)
bins_go_left = np.array(bins_go_left, dtype=X_BINNED_DTYPE)
# We just construct a simple tree with 1 root and 2 children
# parent node
nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
nodes[0]['left'] = 1
nodes[0]['right'] = 2
nodes[0]['feature_idx'] = 0
nodes[0]['is_categorical'] = True
nodes[0]['missing_go_to_left'] = True
# left child
nodes[1]['is_leaf'] = True
nodes[1]['value'] = 1
# right child
nodes[2]['is_leaf'] = True
nodes[2]['value'] = 0
binned_cat_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
raw_categorical_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
for go_left in bins_go_left:
set_bitset_memoryview(binned_cat_bitsets[0], go_left)
set_raw_bitset_from_binned_bitset(raw_categorical_bitsets[0],
|
binned_cat_bitsets[0], categories)
predictor = TreePredictor(nodes, binned_cat_bitsets,
|
raw_categorical_bitsets)
# Check binned data gives correct predictions
prediction_binned = predictor.predict_binned(X_binned,
missing_values_bin_idx=6)
assert_allclose(prediction_binned, expected_predictions)
# manually construct bitset
known_cat_bitsets = np.zeros((1, 8), dtype=np.uint32)
known_cat_bitsets[0, 0] = np.sum(2**categories, dtype=np.uint32)
f_idx_map = np.array([0], dtype=np.uint32)
# Check with un-binned data
predictions = predictor.predict(categories.reshape(-1, 1),
known_cat_bitsets, f_idx_map)
assert_allclose(predictions, expected_predictions)
# Check missing goes left because missing_values_bin_idx=6
X_binned_missing = np.array([[6]], dtype=X_BINNED_DTYPE).T
predictions = predictor.predict_binned(X_binned_missing,
missing_values_bin_idx=6)
assert_allclose(predictions, [1])
# missing and unknown go left
predictions = predictor.predict(np.array([[np.nan, 17]], dtype=X_DTYPE).T,
known_cat_bitsets, f_idx_map)
assert_allclose(predictions, [1, 1])
|
hehongliang/tensorflow
|
tensorflow/python/kernel_tests/conv_ops_test.py
|
Python
|
apache-2.0
| 73,074
| 0.005351
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import layers
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
[4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
[4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
[4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
[4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
[4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
[4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
[4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
[4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
[4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
[4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
[4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
[4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
[1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
[1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
[1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
[3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
[3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
[3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
[1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
[1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
[3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
[1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
[3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
[1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
[1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
[3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
[1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
[3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
[1, 1, 192, 48
|
], [3, 3, 64, 192], [1, 1, 64, 64],
[1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
[4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8
|
, 384],
[4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
[4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
[4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
[4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
[4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
[4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
[4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCHW" format is only supported on CUDA.
test_configs += [("NCHW", True)]
return test_configs
class Conv2DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu and not test_util.CudaSupportsHalfMatMulAndConv():
return [dtypes.float32, dtypes.float64]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16, dtypes.float64]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype, use_gpu):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filt
|
Virako/authapi
|
authapi/api/migrations/0004_auto_20141128_0914.py
|
Python
|
agpl-3.0
| 419
| 0
|
# -*- coding: utf-8 -*-
from __future__
|
import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20141128_0831'),
]
op
|
erations = [
migrations.AlterField(
model_name='acl',
name='user',
field=models.ForeignKey(to='api.UserData', related_name='acls'),
),
]
|
ME-ICA/me-ica
|
meica.libs/nibabel/benchmarks/__init__.py
|
Python
|
lgpl-2.1
| 25
| 0
|
# Benchmarks
|
for nibabel
|
|
WaterSums/EPANETFileUtility
|
EPANETFileUtility.py
|
Python
|
mit
| 16,145
| 0.006813
|
# ex:set ts=4 sw=4: <- for vim
#
# EPANET File Utility
# Uses EPAENTOutputFile.py to read the EPANET output file into memory and
# then displays the content in different ways.
#
# Dependencies:
# - Python 2.6 or 2.7 (32- or 64-bit)
# - wxPython 3.0.0 (32- or 64-bit to match installed version of Python)
# - EPANETOutputFile/EPANETOutputFile.py
#
# Available translations/locales:
# en_AU.UTF-8
import wx
import wx.gizmos
import wx.propgrid as wxpg
import os
import platform
import codecs
import sys
import gettext
from EPANETOutputFile import EPANETOutputFile
from DataPage import DataPage
from TablePage import TablePage
from ExportPage import ExportPage
_hasXLRD = True
try:
import xlrd
#_hasXLRD = False
except ImportError:
_hasXLRD = False
_hasXLWT = True
try:
import xlwt
#_hasXLWT = False
except ImportError:
_hasXLWT = False
_hasXLUTILS = True
try:
import xlutils
#_hasXLUTILS = False
except ImportError:
_hasXLUTILS = False
def main():
# New versions of wxPython require us to create the app very early, so...
# Create a new app, don't redirect stdout/stderr to a window.
app = wx.App(False)
# mostly taken from the wxPython internationalisation example...
# but in the end not using wx Locale because of the difficulty of
# mapping from language name (string) to wx language constant (number)
# initialise language settings:
path = sys.path[0].decode(sys.getfilesystemencoding())
try:
langIni = codecs.open(os.path.join(path,u'language.ini'),'r', 'utf-8')
except IOError:
#language = u'en' #defaults to english
#print('Could not read language.ini')
language = None
pass
else:
language = langIni.read()
locales = {
u'en' : (wx.LANGUAGE_ENGLISH, u'en_US.UTF-8'),
#u'es' : (wx.LANGUAGE_SPANISH, u'es_ES.UTF-8'),
#u'fr' : (wx.LANGUAGE_FRENCH, u'fr_FR.UTF-8'),
}
langdir = os.path.join(path,u'locale')
if language is None:
Lang = gettext.translation(u'EPANETFileUtility', langdir,
fallback=True)
Lang.install(unicode=1)
if Lang.__class__.__name__ == 'NullTranslations' and str(Lang.__class__) == 'gettext.NullTranslations':
print('Language not found')
else:
try:
language = Lang._info['language']
print('Language %s found.' % language)
except (KeyError):
print('Language found (details not available).')
# Lang.info() content seems to depend on the .mo file containing
# the correct language information. If it is not set, the list
# returned is empty and there doesn't seem to be any way to find
# the information
#print('Lang.info() = %s' % Lang.info())
#language = Lang._info['language']
# TODO convert from language name (string) to wx.LANGUAGE_... (number)
#mylocale = wx.Locale(language, wx.LOCALE_LOAD_DEFAULT)
else:
Lang = gettext.translation(u'EPANETFileUtility', langdir, languages=[language])
Lang.install(unicode=1)
#mylocale = wx.Locale(locales[language][0], wx.LOCALE_LOAD_DEFAULT)
if platform.system() == 'Linux':
try:
# to get some language settings to display properly:
os.environ['LANG'] = locales[language][1]
except (ValueError, KeyError):
pass
# A Frame is a top-level window.
frame = MyFrame(None, _("EPANET File Utility"))
app.MainLoop()
def getNextImageID(count):
imID = 0
while True:
yield imID
imID += 1
if imID == count:
imID = 0
"""
Our main panel contains the following:
- a menu bar
- a Frame with a box sizer containing a MyListbook with pictures down the
LHS for the Data/Tables/Graphs/Export options.
At the start, we put a box sizer and a Panel containing a box sizer
with a ColouredPanel in each page: this
must be replaced with valid content when a file is loaded
- TODO allow file name to be specified on the command line
- at startup time, we open a data file and build an EPANETOutputFile object
which we display by:
- creating a box sizer in the MyListbook 'Data' page and adding to it:
- a PropertyGridManager with 4 pages, viz:
- Prolog: properties read from the prolog of the data file but not
including the node and link information
- Energy Usage: a single property
- Dynamic Results: a property grid
- Epilog: a property grid
- a box sizer (treesizer) in which we switch TreeListCtrls as necessary
for the different pages of the PropertyGridManager
"""
class MyFrame(wx.Frame):
""" We simply derive a new class of Frame. """
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=(800,600),
|
#style = wx.SIMPLE_BORDER | wx.TAB_TRAVERSAL
)
self.control = MyListbook(self, -1, None)
self.basetitle = title
self.Sizer
|
= wx.BoxSizer(wx.VERTICAL)
self.Sizer.Add(self.control, 1, wx.GROW)
self.dataPage = None
self.tablePage = None
self.exportPage = None
self.dirname = ''
self.filename = None
self.epanetoutputfile = None
il = wx.ImageList(80, 80)
bmp = wx.Bitmap('images/led_circle_yellow.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_orange.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap( 'images/led_circle_blue.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_green.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_purple.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_red.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_grey.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_black.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
self.control.AssignImageList(il)
imageIdGenerator = getNextImageID(il.GetImageCount())
# Now make a bunch of panels for the list book
colourList = [ "Yellow", "Coral", "Medium orchid", "Green", ]
titleList = [ _("Data"),
_("Tables"),
_("Graphs"),
_("Export"),
]
for i in range(len(titleList)):
colour = colourList[i]
title = titleList[i]
if i == 0:
self.dataPage = win = DataPage(self, self.control, colour)
self.control.AddPage(win, title, imageId=imageIdGenerator.next())
elif i == 1:
self.tablePage = win = TablePage(self, self.control, colour)
self.control.AddPage(win, title, imageId=imageIdGenerator.next())
elif i == 2:
win = self.makeColourPanel(colour)
self.control.AddPage(win, title, imageId=imageIdGenerator.next())
sizer = wx.BoxSizer(wx.VERTICAL)
win.win.SetSizer(sizer)
st = wx.StaticText(win.win, -1,
_(
"""EPANET File Utility
Displaying graphs is not yet supported."""))
sizer.Add(st, 1, wx.GROW | wx.ALL, 10)
elif i == 3:
self.exportPage = win = ExportPage(self, self.control, colour)
self.control.AddPage(win, title, imageId=imageIdGenerator.next())
else:
win = self.makeColourPanel(colour)
self.control.AddPage(win, title, imageId=imageIdGenerator.next())
win = self.control.GetPage(i)
st = wx.StaticText(win.win, -1,
_("EPANET File Utility."),
wx.Point(10, 10))
#win = self.makeColourPanel(colour)
#st = wx.StaticText(win.win, -1, "this is a sub-page", (10,10))
#self.con
|
frastlin/PyAudioGame
|
examples/basic_tutorial/ex1.py
|
Python
|
mit
| 712
| 0.030899
|
#Working with variables
import pyaudiogame
spk = pyaudiogame.speak
MyApp = pyaudiogame.App("My Application")
#Here are some variables
#Let
|
s first write one line of text
my_name = "Frastlin"
#now lets write a number
m
|
y_age = 42
#now lets write several lines of text
my_song = """
My application tis to be,
the coolest you've ever seen!
"""
#Magic time!
def logic(actions):
key = actions['key']
if key == "a":
#Here is our one line of text, it will speak when we press a
spk(my_name)
elif key == "s":
#Here is our number, it will speak when we press s
spk(my_age)
elif key == "d":
#Here is our multiline text example. It will speak when we press d
spk(my_song)
MyApp.logic = logic
MyApp.run()
|
rackerlabs/horizon
|
openstack_dashboard/dashboards/project/access_and_security/keypairs/views.py
|
Python
|
apache-2.0
| 3,100
| 0.000645
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distribute
|
d under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing keypairs.
"""
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from django.templa
|
te.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView
from django.views.generic import View
from horizon import exceptions
from horizon import forms
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.\
keypairs.forms import CreateKeypair
from openstack_dashboard.dashboards.project.access_and_security.\
keypairs.forms import ImportKeypair
LOG = logging.getLogger(__name__)
class CreateView(forms.ModalFormView):
form_class = CreateKeypair
template_name = 'project/access_and_security/keypairs/create.html'
success_url = 'horizon:project:access_and_security:keypairs:download'
def get_success_url(self):
return reverse(self.success_url,
kwargs={"keypair_name": self.request.POST['name']})
class ImportView(forms.ModalFormView):
form_class = ImportKeypair
template_name = 'project/access_and_security/keypairs/import.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
def get_object_id(self, keypair):
return keypair.name
class DownloadView(TemplateView):
def get_context_data(self, keypair_name=None):
return {'keypair_name': keypair_name}
template_name = 'project/access_and_security/keypairs/download.html'
class GenerateView(View):
def get(self, request, keypair_name=None):
try:
keypair = api.nova.keypair_create(request, keypair_name)
except:
redirect = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request,
_('Unable to create keypair: %(exc)s'),
redirect=redirect)
response = http.HttpResponse(mimetype='application/binary')
response['Content-Disposition'] = \
'attachment; filename=%s.pem' % slugify(keypair.name)
response.write(keypair.private_key)
response['Content-Length'] = str(len(response.content))
return response
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/IPython/html/services/contents/filecheckpoints.py
|
Python
|
mit
| 6,954
| 0
|
"""
File-based Checkpoints implementations.
"""
import os
import shutil
from tornado.web import HTTPError
from .checkpoints import (
Checkpoints,
GenericCheckpointsMixin,
)
from .fileio import FileManagerMixin
from IPython.utils import tz
from IPython.utils.path import ensure_dir_exists
from IPython.utils.py3compat import getcwd
from IPython.utils.traitlets import Unicode
class FileCheckpoints(FileManagerMixin, Checkpoints):
"""
A Checkpoints that caches checkpoints for files in adjacent
directories.
Only works with FileContentsManager. Use GenericFileCheckpoints if
you want file-based checkpoints with another ContentsManager.
"""
checkpoint_dir = Unicode(
'.ipynb_checkpoints',
config=True,
help="""The directory name in which to keep file checkpoints
This is a path relative to the file's own directory.
By default, it is .ipynb_checkpoints
""",
)
root_dir = Unicode(config=True)
def _root_dir_default(self):
try:
return self.parent.root_dir
except AttributeError:
return getcwd()
# ContentsManager-dependent checkpoint API
def create_checkpoint(self, contents_mgr, path):
"""Create a checkpoint."""
checkpoint_id = u'checkpoint'
src_path = contents_mgr._get_os_path(path)
dest_path = self.checkpoint_path(checkpoint_id, path)
self._copy(src_path, dest_path)
return self.checkpoint_model(checkpoint_id, dest_path)
def restore_checkpoint(self, contents_mgr, checkpoint_id, path):
"""Restore a checkpoint."""
src_path = self.checkpoint_path(checkpoint_id, path)
dest_path = contents_mgr._get_os_path(path)
self._copy(src_path, dest_path)
# ContentsManager-independent checkpoint API
def rename_checkpoint(self, checkpoint_id, old_path, new_path):
"""Rename a checkpoint from old_path to new_path."""
old_cp_path = self.checkpoint_path(checkpoint_id, old_path)
new_cp_path = self.checkpoint_path(checkpoint_id, new_path)
if os.path.isfile(old_cp_path):
self.log.debug(
"Renaming checkpoint %s -> %s",
old_cp_path,
new_cp_path,
)
with self.perm_to_403():
shutil.move(old_cp_path, new_cp_path)
def delete_checkpoint(self, checkpoint_id, path):
"""delete a file's checkpoint"""
path = path.strip('/')
cp_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(cp_path):
self.no_such_checkpoint(path, checkpoint_id)
self.log.debug("unlinking %s", cp_path)
with self.perm_to_403():
os.unlink(cp_path)
def list_checkpoints(self, path):
"""list the checkpoints for a given file
This contents manager currently only supports one checkpoint per file.
"""
path = path.strip('/')
checkpoint_id = "checkpoint"
os_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(os_path):
return []
else:
return [self.checkpoint_model(checkpoint_id, os_path)]
# Checkpoint-related utilities
def checkpoint_path(self, checkpoint_id, path):
"""find the path to a checkpoint"""
path = path.strip('/')
parent, name = ('/' + path).rsplit('/', 1)
parent = parent.strip('/')
basename, ext = os.path.splitext(name)
filename = u"{name}-{checkpoint_id}{ext}".format(
name=basename,
checkpoint_id=checkpoint_id,
ext=ext,
)
os_path = self._get_os_path(path=parent)
cp_dir = os.path.join(os_path, self.checkpoint_dir)
with self.perm_to_403():
ensure_dir_exists(cp_dir)
cp_path = os.path.join(cp_dir, filename)
return cp_path
def checkpoint_model(self, checkpoint_id, os_path):
"""construct the info dict for a given checkpoint"""
stats = os.stat(os_path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
id=checkpoint_id,
last_modified=last_modified,
)
return info
# Error Handling
def no_such_checkpoint(self, path, checkpoint_id):
raise HTTPError(
404,
u'Checkpoint does not exist: %s@%s' % (path, checkpoint_id)
)
class GenericFileCheckpoints(GenericCheckpointsMixin, FileCheckpoints):
"""
Local filesystem Checkpoints that works with any conforming
ContentsManager.
"""
def create_file_checkpoint(self, content, format, path):
"""Create a checkpoint from the current content of a file."""
path = path.strip('/')
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
os_checkpoint_path = self.checkpoint_path(checkpoint_id, path)
self.log.debug("creating checkpoint for %s", path)
with self.perm_to_403():
self._save_file(os_checkpoint_path, content, format=format)
# return the checkpoint info
return self.checkpoint_model(checkpoint_id, os_checkpoint_path)
def create_notebook_checkpoint(self, nb, path):
"""Create a checkpoint from the current content of a notebook."""
path = path.strip('/')
# only the
|
one checkpoint ID:
checkpoint_id = u"checkpoint"
os_checkpoint_path = self.checkpoint_path(
|
checkpoint_id, path)
self.log.debug("creating checkpoint for %s", path)
with self.perm_to_403():
self._save_notebook(os_checkpoint_path, nb)
# return the checkpoint info
return self.checkpoint_model(checkpoint_id, os_checkpoint_path)
def get_notebook_checkpoint(self, checkpoint_id, path):
"""Get a checkpoint for a notebook."""
path = path.strip('/')
self.log.info("restoring %s from checkpoint %s", path, checkpoint_id)
os_checkpoint_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(os_checkpoint_path):
self.no_such_checkpoint(path, checkpoint_id)
return {
'type': 'notebook',
'content': self._read_notebook(
os_checkpoint_path,
as_version=4,
),
}
def get_file_checkpoint(self, checkpoint_id, path):
"""Get a checkpoint for a file."""
path = path.strip('/')
self.log.info("restoring %s from checkpoint %s", path, checkpoint_id)
os_checkpoint_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(os_checkpoint_path):
self.no_such_checkpoint(path, checkpoint_id)
content, format = self._read_file(os_checkpoint_path, format=None)
return {
'type': 'file',
'content': content,
'format': format,
}
|
UFCGProjects/sig
|
src/tests/DropTablesTest.py
|
Python
|
mit
| 1,528
| 0.003927
|
import psycopg2
import unittest
import sys
class LDropTablesTest(unittest.TestCase):
def setUp(self):
conn = psycopg2.connect("dbname=teste user=postgres")
conn.set_isolation_level(0) # set autocommit
self.cur = conn.cursor()
def tearDown(self):
self.cur.close()
def testCDropTableFugaRota(self):
self.cur.execute("DROP TABLE FugaRota;")
self.assertEqual(self.cur.s
|
tatusmessage, "DROP TABLE")
def testEDropTableHorario(self):
self.cur.execute("DROP TABLE Horario;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
def testDDropTableLocalization(self):
self.cur.execute("DROP TABLE Localization CASCADE;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
|
def testFDropTableOnibus(self):
self.cur.execute("DROP TABLE Onibus;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
def testBDropTablePontoOnibusRota(self):
self.cur.execute("DROP TABLE PontoOnibus_Rota;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
def testGDropTablePontoOnibus(self):
self.cur.execute("DROP TABLE PontoOnibus;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
def testADropTableRota(self):
self.cur.execute("DROP TABLE Rota CASCADE;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
|
Sprytile/Sprytile
|
rx/subjects/anonymoussubject.py
|
Python
|
mit
| 548
| 0
|
from rx.core import ObservableBase
class AnonymousSubject(ObservableBase):
def __init__(self, observer, observable):
super(AnonymousSubject, self).__init__()
self.observer = observer
self.observable = observable
def _subscribe_core(self, obs
|
erver):
return self.observable.subscribe(observer)
def on_completed(self):
self.observer.on_completed
|
()
def on_error(self, exception):
self.observer.on_error(exception)
def on_next(self, value):
self.observer.on_next(value)
|
alirizakeles/tendenci
|
tendenci/apps/payments/forms.py
|
Python
|
gpl-3.0
| 2,831
| 0.000706
|
from datetime import datetime
from django import forms
from django.utils.translation import ugettext_lazy as _
from tendenci.apps.base.fields import SplitDateTimeField
from tendenci.apps.payments.models import Payment, PaymentMethod
PAYMENT_METHODS = PaymentMethod.objects.filter().values_list(
'machine_name', 'human_name').exclude()
class MarkAsPaidForm(forms.ModelForm):
payment_method = forms.CharField(
max_length=20,
widget=forms.Select(choices=PAYMENT_METHODS))
submit_dt = SplitDateTimeField(
label=_('Submit Date and Time'),
initial=datetime.now()
|
)
class Meta:
model = Payment
fields = (
'amount',
'payment_method',
'submit_dt',
)
def save(self, user, invoice, *args, **kwargs):
"""
Save payment, bind invoice inst
|
ance.
Set payment fields (e.g. name, description)
"""
instance = super(MarkAsPaidForm, self).save(*args, **kwargs)
instance.method = self.cleaned_data['payment_method']
instance.invoice = invoice
instance.first_name = invoice.bill_to_first_name
instance.last_name = invoice.bill_to_last_name
instance.email = invoice.bill_to_email
instance.status_detail = 'approved'
instance.creator = user
instance.creator_username = user.username
instance.owner = user
instance.owner_username = user.username
instance.save()
invoice_object = invoice.get_object()
if invoice_object:
if hasattr(invoice_object, 'get_payment_description'):
instance.description = invoice_object.get_payment_description(invoice)
if not instance.description:
instance.description = 'Tendenci Invoice {} for {}({})'.format(
instance.pk, invoice_object, invoice_object.pk)
return instance
class PaymentSearchForm(forms.Form):
SEARCH_CRITERIA_CHOICES = (
('', _('SELECT ONE')),
('first_name', _('First Name')),
('last_name', _('Last Name')),
('amount', _('Amount')),
('owner_username', _('Owner Username')),
('id', _('Payment ID')),
('invoice__id', _('Invoice ID')),
('trans_id', _('Transaction ID')),
('auth_code', _('Authorization Code'))
)
SEARCH_METHOD_CHOICES = (
('starts_with', _('Starts With')),
('contains', _('Contains')),
('exact', _('Exact')),
)
search_criteria = forms.ChoiceField(choices=SEARCH_CRITERIA_CHOICES,
required=False)
search_text = forms.CharField(max_length=100, required=False)
search_method = forms.ChoiceField(choices=SEARCH_METHOD_CHOICES,
required=False)
|
djabber/Dashboard
|
bottle/dash/local/lib/pif-0.7/src/pif/utils.py
|
Python
|
mit
| 1,686
| 0.002372
|
from __future__ import print_function
__title__ = 'pif.utils'
__author__ = 'Artur Barseghyan'
__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('ensure_autodiscover', 'list_checkers', 'get_public_ip')
from pif.base import registry
from pif.discover import autodiscover
def ensure_autodiscover():
"""
Ensures the IP checkers are discovered.
"""
if not registry._registry:
autodiscover()
def list_checkers():
"""
Lists available checkers.
:return list:
"""
return registry._registry.keys()
def get_public_ip(preferred_checker=None, verbose=False):
"""
Gets IP using one of the services.
:param str preffered checker: Checker UID. If given, the preferred checker is used.
:param bool verbose: If set to True, debug info is printed.
:return str:
"""
ensure_autodiscover()
# If use preferred checker.
if preferred_checker:
ip_checker_cls = registry.get(preferred_checker)
if not ip_checker_cls:
return False
|
ip_checker = ip_checker_cls(verbose=verbose)
ip = ip_checker.get_public_ip()
if verbose:
print('provider: ',
|
ip_checker_cls)
return ip
# Using all checkers.
for ip_checker_name, ip_checker_cls in registry._registry.items():
ip_checker = ip_checker_cls(verbose=verbose)
try:
ip = ip_checker.get_public_ip()
if ip:
if verbose:
print('provider: ', ip_checker_cls)
return ip
except Exception as e:
if verbose:
print(e)
return False
|
ceb8/astroquery
|
astroquery/jplhorizons/core.py
|
Python
|
bsd-3-clause
| 67,058
| 0.000224
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# 1. standard library imports
from numpy import nan
from numpy import isnan
from numpy import ndarray
from collections import OrderedDict
import warnings
# 2. third party imports
from astropy.table import Table, Column
from astropy.io import ascii
from astropy.time import Time
from astropy.utils.exceptions import AstropyDeprecationWarning
# 3. local imports - use rel
|
ative imports
# commonly required local imports shown below as example
# all Query classes should inherit from BaseQuery.
from ..query import BaseQuery
# async_to_sync generates the relevant query tools from _async methods
from ..utils import async_to_sync
# import configurable items declared in __init__.py
from . import conf
__all__ = ['Horizons', 'HorizonsClass']
@async_to_sync
class HorizonsClass(BaseQuery):
"""
A
|
class for querying the
`JPL Horizons <https://ssd.jpl.nasa.gov/horizons/>`_ service.
"""
TIMEOUT = conf.timeout
def __init__(self, id=None, location=None, epochs=None,
id_type=None):
"""Instantiate JPL query.
Parameters
----------
id : str, required
Name, number, or designation of the object to be queried.
location : str or dict, optional
Observer's location for ephemerides queries or center body name for
orbital element or vector queries. Uses the same codes as JPL
Horizons. If no location is provided, Earth's center is used for
ephemerides queries and the Sun's center for elements and vectors
queries. Arbitrary topocentric coordinates for ephemerides queries
can be provided in the format of a dictionary. The dictionary has to
be of the form {``'lon'``: longitude in deg (East positive, West
negative), ``'lat'``: latitude in deg (North positive, South
negative), ``'elevation'``: elevation in km above the reference
ellipsoid, [``'body'``: Horizons body ID of the central body;
optional; if this value is not provided it is assumed that this
location is on Earth]}.
epochs : scalar, list-like, or dictionary, optional
Either a list of epochs in JD or MJD format or a dictionary defining
a range of times and dates; the range dictionary has to be of the
form {``'start'``: 'YYYY-MM-DD [HH:MM:SS]', ``'stop'``: 'YYYY-MM-DD
[HH:MM:SS]', ``'step'``: 'n[y|d|m|s]'}. Epoch timescales depend on
the type of query performed: UTC for ephemerides queries, TDB for
element and vector queries. If no epochs are provided, the current
time is used.
id_type : str, optional
Controls Horizons's object selection for ``id``
[HORIZONSDOC_SELECTION]_ . Options: ``'designation'`` (small body
designation), ``'name'`` (asteroid or comet name),
``'asteroid_name'``, ``'comet_name'``, ``'smallbody'`` (asteroid
and comet search), or ``None`` (first search search planets,
natural satellites, spacecraft, and special cases, and if no
matches, then search small bodies).
References
----------
.. [HORIZONSDOC_SELECTION] https://ssd.jpl.nasa.gov/horizons/manual.html#select (retrieved 2021 Sep 23).
Examples
--------
>>> from astroquery.jplhorizons import Horizons
>>> eros = Horizons(id='433', location='568',
... epochs={'start':'2017-01-01',
... 'stop':'2017-02-01',
... 'step':'1d'})
>>> print(eros) # doctest: +SKIP
JPLHorizons instance "433"; location=568, epochs={'start': '2017-01-01', 'step': '1d', 'stop': '2017-02-01'}, id_type=None
"""
super(HorizonsClass, self).__init__()
self.id = id
self.location = location
# check for epochs to be dict or list-like; else: make it a list
if epochs is not None:
if isinstance(epochs, (list, tuple, ndarray)):
pass
elif isinstance(epochs, dict):
if not ('start' in epochs and
'stop' in epochs and
'step' in epochs):
raise ValueError('time range ({:s}) requires start, stop, '
'and step'.format(str(epochs)))
else:
# turn scalars into list
epochs = [epochs]
self.epochs = epochs
# check for id_type
if id_type in ['majorbody', 'id']:
warnings.warn("``id_type``s 'majorbody' and 'id' are deprecated "
"and replaced with ``None``, which has the same "
"functionality.", AstropyDeprecationWarning)
id_type = None
if id_type not in [None, 'smallbody', 'designation', 'name',
'asteroid_name', 'comet_name']:
raise ValueError('id_type ({:s}) not allowed'.format(id_type))
self.id_type = id_type
# return raw response?
self.return_raw = False
self.query_type = None # ['ephemerides', 'elements', 'vectors']
self.uri = None # will contain query URL
self.raw_response = None # will contain raw response from server
def __str__(self):
"""
String representation of HorizonsClass object instance'
Examples
--------
>>> from astroquery.jplhorizons import Horizons
>>> eros = Horizons(id='433', location='568',
... epochs={'start':'2017-01-01',
... 'stop':'2017-02-01',
... 'step':'1d'})
>>> print(eros) # doctest: +SKIP
JPLHorizons instance "433"; location=568, epochs={'start': '2017-01-01', 'step': '1d', 'stop': '2017-02-01'}, id_type=None
"""
return ('JPLHorizons instance \"{:s}\"; location={:s}, '
'epochs={:s}, id_type={:s}').format(
str(self.id),
str(self.location),
str(self.epochs),
str(self.id_type))
# ---------------------------------- query functions
def ephemerides_async(self, airmass_lessthan=99,
solar_elongation=(0, 180), max_hour_angle=0,
rate_cutoff=None,
skip_daylight=False,
refraction=False,
refsystem='ICRF',
closest_apparition=False, no_fragments=False,
quantities=conf.eph_quantities,
get_query_payload=False,
get_raw_response=False, cache=True,
extra_precision=False):
"""
Query JPL Horizons for ephemerides.
The ``location`` parameter in ``HorizonsClass`` refers in this case to
the location of the observer.
The following tables list the values queried, their definitions, data
types, units, and original Horizons designations (where available). For
more information on the definitions of these quantities, please refer to
the `Horizons User Manual <https://ssd.jpl.nasa.gov/?horizons_doc>`_.
+------------------+-----------------------------------------------+
| Column Name | Definition |
+==================+===============================================+
| targetname | official number, name, designation (string) |
+------------------+-----------------------------------------------+
| H | absolute magnitude in V band (float, mag) |
+------------------+-----------------------------------------------+
| G | photometric slope parameter (float) |
+------------------+-----------------------------------------------+
| M1 | come
|
Venturi/cms
|
env/lib/python2.7/site-packages/aldryn_newsblog/migrations/0005_auto_20150807_0207.py
|
Python
|
gpl-2.0
| 558
| 0.001792
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('aldryn_newsblog', '0004_auto_20150622_1606'),
]
operations = [
migrations.AlterField(
model_name='newsblogconfig',
name='template_prefix',
field=models.CharField(blank=True, max_le
|
ngth=20, null=True, verbose_name='Prefix for template dirs', choices=[(b'dummy', b'dummy')]),
preserve_default=True
|
,
),
]
|
elianearaujo/tst-qcheck
|
bin/qchecklib.py
|
Python
|
agpl-3.0
| 3,348
| 0.011947
|
#!/usr/bin/env python
# Aid tools to quality checker.
# Qchecklib
# Eliane Araujo, 2016
import os
import sys
import commands
import json
try:
from cc import measure_complexity
except ImportError:
print("tst quality checker needs cc.py to work.")
sys.exit(1)
try:
sys.path.append('/usr/local/bin/radon/')
from radon.raw import *
from radon.complexity import *
from radon.metrics import *
except ImportError:
print("tst quality checker needs radon to work.")
sys.exit(1)
try:
im
|
port urllib.request as urlrequest
except ImportError:
import urllib as urlrequest
url = 'http://qchecklog.appspot.com/api/action/'
def four_metrics(program_name):
return "%s %s %s %s" % ( lloc(program_name), cc(program
|
_name), vhalstead(program_name), pep8(program_name)["count"])
def pep8count(program):
return int(pep8(program)[0])
def pep8(program):
result = []
cmd = 'pycodestyle.py --select=E --count ' + program
try:
pep_errors = commands.getoutput(cmd)
except ImportError:
print("tst quality checker needs pycodestyle.py to work.")
sys.exit(1)
if pep_errors:
for error in pep_errors.splitlines():
if error.isdigit():
result.insert(0, int(error))
break
#remove filename from message.
#Example:
#reference.py:15:16: E225 missing whitespace around operator
result.append( error[error.find(":") + 1:] )
else:
result = [0]
return result
def header_lines(filename):
# Count header's lines
# Consider "coding" and "env" as header
program = open(filename, 'r')
code = program.read()
counter = 0
codelines = code.split("\n")
while codelines[counter].startswith("#"):
counter += 1
program.close()
return counter
def vhalstead(filename):
return halstead_metrics("vol", filename)
def halstead_metrics(options, filename):
#It may be used another options
program = open(filename, 'r')
code = program.read()
if options == 'vol':
h = h_visit(code).volume
else:
h = h_visit(code)
program.close()
return round(h, 2)
def cc(filename):
# Radon complexity method only applies to programs containing classes or functions.
# Using another API to other cases.
program = open(filename, 'r')
code = program.read()
try:
# Use radon
visitor = cc_visit(code)
if len(visitor) <= 0:
# Doesn't have functions or classes.
# Use cc.py
stats = measure_complexity(code)
cc = stats.complexity
else:
cc = 0
for i in range( len(visitor) ):
cc += visitor[i].complexity
except Exception as e:
# Failed
print("qcheck: unable to get cc")
cc = 0
program.close()
return cc
def lloc(filename):
program = open(filename, 'r')
code = program.read()
lines = raw_metrics(code)[1]
program.close()
return lines
def raw_metrics(code):
return analyze(code)
def save(message):
type_ = 'accept'
urlrequest.urlopen(url + type_, data=message)
if __name__ == '__main__':
print("qchecklib is a helper module for tst_qcheck commands")
|
wavemoth/wavemoth
|
wavemoth/test/test_blas.py
|
Python
|
gpl-2.0
| 812
| 0.004926
|
import numpy as np
from numpy import all
from numpy.testing import assert_almost_equal
from nose.tools import ok_
from ..blas import *
def ndrange(shape, dtype=np.double, order='C'):
return np.arange(np.prod(shape), dtype=dtype).reshape(shape).copy(order)
def assert_dgemm(dgemm_func, A_order, B_order, C_order):
def test(m, n, k):
A = ndrange((m, k), order=A_orde
|
r)
B = ndrange((k, n), order=B_order)
C = np.zeros((m, n), order=C_order)
dgemm_func
|
(A, B, C)
assert_almost_equal(C, np.dot(A, B))
test(2, 3, 4)
test(0, 3, 4)
test(2, 0, 4)
test(2, 3, 0)
test(0, 0, 2)
test(0, 2, 0)
test(0, 0, 2)
test(0, 0, 0)
def test_dgemm():
yield assert_dgemm, dgemm_crc, 'F', 'C', 'F'
yield assert_dgemm, dgemm_ccc, 'F', 'F', 'F'
|
vyscond/my-college-api
|
cfg/wsgi.py
|
Python
|
mit
| 546
| 0
|
"""
WSGI config for cfg project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_appl
|
ication
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cfg.settings")
try:
if os.environ['ENV'] == 'production':
from dj_static import Cling
application = Cling(get_wsgi_application())
except Exception as e:
applica
|
tion = get_wsgi_application()
|
amagdas/superdesk
|
server/apps/aap/import_text_archive/commands.py
|
Python
|
agpl-3.0
| 9,503
| 0.001789
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
import urllib3
import urllib
import xml.etree.ElementTree as etree
from superdesk.io.iptc import subject_codes
from datetime import datetime
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, ITEM_STATE, CONTENT_STATE
from superdesk.utc import utc
from superdesk.io.commands.update_ingest import process_iptc_codes
from superdesk.etree import get_text_word_count
# The older content does not contain an anpa category, so we derive it from the
# publication name
pubnames = {
'International Sport': 'S',
'Racing': 'R',
'Parliamentary Press Releases': 'P',
'Features': 'C',
'Financial News': 'F',
'General': 'A',
'aap Features': 'C',
'aap International News': 'I',
'aap Australian Sport': 'S',
'Australian General News': 'A',
'Asia Pulse Full': 'I',
'AFR Summary': 'A',
'Australian Sport': 'T',
'PR Releases': 'J',
'Entertainment News': 'E',
'Special Events': 'Y',
'Asia Pulse': 'I',
'aap International Sport': 'S',
'Emergency Services': 'A',
'BRW Summary': 'A',
'FBM Summary': 'A',
'aap Australian General News': 'A',
'International News': 'I',
'aap Financial News': 'F',
'Asia Pulse Basic': 'I',
'Political News': 'P',
'Advisories': 'V'
}
class AppImportTextArchiveCommand(superdesk.Command):
option_list = (
superdesk.Option('--start', '-strt', dest='start_id', required=False),
superdesk.Option('--user', '-usr', dest='user', required=True),
superdesk.Option('--password', '-pwd', dest='password', required=True),
superdesk.Option('--url_root', '-url', dest='url', required=True),
superdesk.Option('--query', '-qry', de
|
st='query', required=True),
superdesk.Option('--count', '-c', dest='limit', required=False)
)
def run(self, start_id, user, password, url, query, limit):
print('Starting text archive import at {}'.format(start_id))
self._user = user
self._password = password
self._id = int(start_id)
self._url_root = url
self._query = urllib.parse.quote(query)
if limit is not No
|
ne:
self._limit = int(limit)
else:
self._limit = None
self._api_login()
x = self._get_bunch(self._id)
while x:
self._process_bunch(x)
x = self._get_bunch(self._id)
if self._limit is not None and self._limit <= 0:
break
print('finished text archive import')
def _api_login(self):
self._http = urllib3.PoolManager()
credentials = '?login[username]={}&login[password]={}'.format(self._user, self._password)
url = self._url_root + credentials
r = self._http.urlopen('GET', url, headers={'Content-Type': 'application/xml'})
self._headers = {'cookie': r.getheader('set-cookie')}
self._anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
def _get_bunch(self, id):
url = self._url_root + \
'archives/txtarch?search_docs[struct_query]=(DCDATA_ID<{0})&search_docs[query]='.format(id)
url += self._query
url += '&search_docs[format]=full&search_docs[pagesize]=500&search_docs[page]=1'
url += '&search_docs[sortorder]=DCDATA_ID%20DESC'
print(url)
retries = 3
while retries > 0:
r = self._http.request('GET', url, headers=self._headers)
if r.status == 200:
e = etree.fromstring(r.data)
# print(str(r.data))
count = int(e.find('doc_count').text)
if count > 0:
print('count : {}'.format(count))
return e
else:
self._api_login()
retries -= 1
return None
def _get_head_value(self, doc, field):
el = doc.find('dcdossier/document/head/' + field)
if el is not None:
return el.text
return None
def _addkeywords(self, key, doc, item):
code = self._get_head_value(doc, key)
if code:
if 'keywords' not in item:
item['keywords'] = []
item['keywords'].append(code)
def _process_bunch(self, x):
# x.findall('dc_rest_docs/dc_rest_doc')[0].get('href')
for doc in x.findall('dc_rest_docs/dc_rest_doc'):
print(doc.get('href'))
id = doc.find('dcdossier').get('id')
if int(id) < self._id:
self._id = int(id)
item = {}
item['guid'] = doc.find('dcdossier').get('guid')
# if the item has been modified in the archive then it is due to a kill
# there is an argument that this item should not be imported at all
if doc.find('dcdossier').get('created') != doc.find('dcdossier').get('modified'):
item[ITEM_STATE] = CONTENT_STATE.KILLED
else:
item[ITEM_STATE] = CONTENT_STATE.PUBLISHED
value = datetime.strptime(self._get_head_value(doc, 'PublicationDate'), '%Y%m%d%H%M%S')
item['firstcreated'] = utc.normalize(value) if value.tzinfo else value
item['versioncreated'] = item['firstcreated']
item['unique_id'] = doc.find('dcdossier').get('unique')
item['ingest_id'] = id
item['source'] = self._get_head_value(doc, 'Agency')
self._addkeywords('AsiaPulseCodes', doc, item)
byline = self._get_head_value(doc, 'Byline')
if byline:
item['byline'] = byline
# item['service'] = self._get_head_value(doc,'Service')
category = self._get_head_value(doc, 'Category')
if not category:
publication_name = self._get_head_value(doc, 'PublicationName')
if publication_name in pubnames:
category = pubnames[publication_name]
if category:
anpacategory = {}
anpacategory['qcode'] = category
for anpa_category in self._anpa_categories['items']:
if anpacategory['qcode'].lower() == anpa_category['qcode'].lower():
anpacategory = {'qcode': anpacategory['qcode'], 'name': anpa_category['name']}
break
item['anpa_category'] = [anpacategory]
self._addkeywords('CompanyCodes', doc, item)
type = self._get_head_value(doc, 'Format')
if type == 'x':
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
elif type == 't':
item[ITEM_TYPE] = CONTENT_TYPE.PREFORMATTED
else:
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
item['keyword'] = self._get_head_value(doc, 'Keyword')
item['ingest_provider_sequence'] = self._get_head_value(doc, 'Sequence')
orginal_source = self._get_head_value(doc, 'Author')
if orginal_source:
item['original_source'] = orginal_source
item['headline'] = self._get_head_value(doc, 'Headline')
code = self._get_head_value(doc, 'SubjectRefNum')
if code and len(code) == 7:
code = '0' + code
if code and code in subject_codes:
item['subject'] = []
item['subject'].append({'qcode': code, 'name': subject_codes[code]})
try:
process_iptc_codes(item, None)
except:
pass
slug = self._get_head_value(doc, 'SLUG')
if slug:
item['slugline'] = slug
else:
item['slugline'] = self._get_head_value(doc, 'Keyword')
# self._addkeywords('Takekey', doc, item)
|
pgaref/HTTP_Request_Randomizer
|
http_request_randomizer/requests/parsers/js/UnPacker.py
|
Python
|
mit
| 1,507
| 0.004645
|
import re
import requests
import logging
logger = logging.getLogger(__name__)
class JsUnPacker(object):
"""
It takes the javascript file's url which contains the port numbers for
the encrypted strings. The file has to be unpacked to a readable form just like
http://matthewfl.com/unPacker.html does. Then we create a dictionary for
every key:port pair.
"""
# TODO: it might not be necessary to unpack the js code
def __init__(self, js_file_url):
logger.info("JS UnPacker init path: {}".format(js_file_url))
r = requests.get(js_file_url)
encrypted = r.text.strip()
encrypted = '(' + encrypted.split('}(')[1][:-1]
unpacked = eval('self.unpack' +encrypted) # string of the js code in unpacked form
matches = re.findall(r".*?\('\.([a-zA-Z0-9]{1,6})'\).*?\((\d+)\)", unpacked)
self.ports = dict((key, port) for key, port in matches)
logger.debug('portmap: '+str(self.ports))
def baseN(self, num, b, numerals="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"):
return ((num == 0) and numerals[0]) or (self.baseN(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])
def unpack(self, p, a, c, k, e=None, d=None):
while c:
c -= 1
if k[c]:
p = re.sub("\
|
\b" + self.bas
|
eN(c, a) + "\\b", k[c], p)
return p
def get_port(self, key):
return self.ports[key]
def get_ports(self):
return self.ports
|
liberza/bacon
|
simulator/payload.py
|
Python
|
mit
| 4,318
| 0.003475
|
#!/usr/bin/python3
import json
import numpy as np
import random
class Payload():
'''
Instance contains altitude data for a balloon flight. Use alt() method
to get interpolated data.
'''
addr = None
name = "Payload"
time_index = 0.0 # index into the flight profile data, in seconds.
timestep = 1.0 # gets larger as weight decreases.
last_request_time = 0
last_alt = 0
def __init__(self, filename, mass, ballast, name):
self.parse_profile(filename)
self.initial_mass = mass # kg
self.mass = self.initial_mass # kg
# ballast is included in total mass.
self.initial_ballast = ballast # liters
self.name = name
def parse_profile(self, filename):
'''
Parse a JSON file containing an ascent profile.
'''
with open(filename, 'r') as data_file:
profile = json.load(data_file)
# Create an array of int32's. Alt is in decimeters.
self.alts = (np.array(profile['data'])*10).astype(np.int32)
self.ref_timestep = profile['timestep'] # s
self.ref_mass = profile['mass'] # s
self.times = np.arange(0, self.alts.size*self.ref_timestep, self.ref_timestep)
def alt(self):
'''
Returns the altitude at the desired time.
s is the time in seconds, with 0 being the beginning
of the flight.
'''
# alt = None if seconds is outside of the flight time.
if (self.time_index > self.alts.size):
alt = None
print("time index > alt size")
print(self.time_index)
elif (self.time_index < 0):
alt = None
print("time index < 0")
print(self.time_index)
# otherwise, linearly interpolate between the two closest values.
else:
alt = np.empty
alt = np.interp(self.time_index, self.times, self.alts)
return alt
# Did curve-fitting on HabHub data to come up with timestep adjustment.
def adjust_time(self, time_elapsed):
time_delta = time_elapsed - self.last_request_time
self.last_request_time = time_elapsed
x = self.ref_mass / self.mass
|
self.timestep = 1.0/(-0.0815243*x*x*x + 0.1355*x*x - 0.391461*x + 1.33748611)
self.time_index += time_delta*self.timestep
def drop_mass(self, ballast_time_ms):
# experimental results show 4.925ml/s drain rate. with current setup.
# we give it random +-10% error, because the payload is getting
# blasted by high winds and the ballast is sloshing around.
noise = random.uniform(0.9,
|
1.1)
new_mass = self.mass - (noise*ballast_time_ms*0.004925/1000)*0.8
if (new_mass > self.initial_mass - self.initial_ballast):
self.mass = new_mass
else:
self.mass = self.initial_mass - self.initial_ballast
if __name__ == '__main__':
# initialize Flights. 'PAYLOAD_X_ID' is the digimesh ID of payload X.
fp1 = Payload('profiles/umhab52.json', 1.2)
fp2 = Payload('profiles/umhab48.json', 1.4)
'''
xbee = XBee.init('/dev/xbee', 1200) # initialize serial for XBee, 1200baud
ft = 0 # flight time starts at 0
cur_payload = None
while(True):
# Wait for payloads to request an altitude, send it, and update the
# payload’s mass. Add noise into the system for realistic simulation.
req = alt_request_wait();
if ((req.addr != fp1.addr) and (req.addr != fp2.addr)):
if (fp1.addr == None):
fp1.addr = req.addr
cur_payload = fp1
else if (fp2.addr == None):
fp2.addr = req.addr
cur_payload = fp2
else:
print('Got another XBee\'s frame. Maybe change the network id.')
elif (req.addr == fp1.addr):
print('got a fp1 alt request')
cur_payload = fp1
else:
print('got a fp2 alt request')
cur_payload = fp2
XBee.sendAlt(cur_payload.addr, cur_payload.alt())
fp1.mass -= XBee.getBallastDropped(fp1.id)*mass_noise()
ft += timestep
print(fp.timestep)
print(fp.alts)
print(fp.alts.size)
print(fp.times.size)
print(fp.alt(24))
'''
|
amueller/strata_singapore_2015
|
solutions/cross_validation_iris.py
|
Python
|
cc0-1.0
| 280
| 0
|
from sklearn.datasets import load_iris
from sklea
|
rn.cross_validation import StratifiedKFold, KFold
iris = load_iris()
X, y = iris.data, iris.target
print(cross_val_sco
|
re(LinearSVC(), X, y, cv=KFold(len(X), 3)))
print(cross_val_score(LinearSVC(), X, y, cv=StratifiedKFold(y, 3)))
|
Treode/store
|
client/python/tx_clock.py
|
Python
|
apache-2.0
| 2,394
| 0.003759
|
# Copyright 2014 Treode, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, time
from functools import total_ordering
from tx_clock import *
@total_ordering
class TxClock(object):
__min_micro_seconds = long(0)
__max_micro_seconds = long(2**63 - 1)
# Bounds on TxClock values
@staticmethod
def min_value():
return TxClock(micro_seconds=TxClock.__min_micro_seconds)
# In Python, longs are actually unbounded, but we'll give TxClock a max
@staticmethod
def max_value():
return TxClock(micro_seconds=TxClock.__max_micro_seconds)
@staticmethod
def now():
current_time_micro_seconds = long(time.time())*10**6
return TxClock(micro_seconds=current_t
|
ime_micro_seconds)
# Input time in micro-seconds!
def __init__(self, micro_seconds=None):
if (micro_seconds == None):
raise ValueError("Please input time in micro-seconds!")
elif (micro_seconds > TxClock.__max_micro_seconds):
print "micro_seconds: ", micro_seconds
print "max: ", TxClock.__max_micro_seconds
|
raise ValueError("micro_seconds arg > max micro_seconds value")
# Assume user input time in micro-seconds
else:
self.time = long(micro_seconds)
def to_seconds(self):
# Times are ints, not floats
return self.time / (10**6)
def __repr__(self):
return "TxClock(%s)" % str(self.time)
# Make TxClock instances comparable
def __eq__(self, other):
if (type(other) == TxClock):
return self.time == other.time
else:
return False
def __gt__(self, other):
if (type(other) == TxClock):
return self.time > other.time and not self.__eq__(other)
elif (other == None):
return True
else:
return False
|
caseyrollins/osf.io
|
admin/base/settings/defaults.py
|
Python
|
apache-2.0
| 7,533
| 0.00146
|
"""
Django settings for the admin project.
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
from django.contrib import messages
from api.base.settings import * # noqa
# TODO ALL SETTINGS FROM API WILL BE IMPORTED AND WILL NEED TO BE OVERRRIDEN
# TODO THIS IS A STEP TOWARD INTEGRATING ADMIN & API INTO ONE PROJECT
# import local # Build own local.py (used with postgres)
# TODO - remove duplicated items, as this is now using settings from the API
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# from the OSF settings
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
# Don't allow migrations
DATABASE_ROUTERS = ['admin.base.db.router.NoMigrationRouter']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
# session:
SESSION_COOKIE_NAME = 'admin'
SESSION_COOKIE_SECURE = osf_settings.SECURE_MODE
SESSION_COOKIE_HTTPONLY = osf_settings.SESSION_COOKIE_HTTPONLY
# csrf:
CSRF_COOKIE_NAME = 'admin-csrf'
CSRF_COOKIE_SECURE = osf_settings.SECURE_MODE
# set to False: prereg uses a SPA and ajax and grab the token to use it in the requests
CSRF_COOKIE_HTTPONLY = False
ALLOWED_HOSTS = [
'.osf.io'
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 5,
}
},
]
USE_L10N = False
# Email settings. Account created for testing. Password shouldn't be hardcoded
# [DEVOPS] this should be set to 'django.core.mail.backends.smtp.EmailBackend' in the > dev local.py.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Sendgrid Email Settings - Using OSF credentials.
# Add settings references to local.py
EMAIL_HOST = osf_settings.MAIL_SERVER
EMAIL_HOST_USER = osf_settings.MAIL_USERNAME
EMAIL_HOST_PASSWORD = osf_settings.MAIL_PASSWORD
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# 3rd party
'django_celery_results',
'raven.contrib.django.raven_compat',
'webpack_loader',
'django_nose',
'password_reset',
'guardian',
'waffle',
# OSF
'osf',
# Addons
'addons.osfstorage',
'addons.wiki',
'addons.twofactor',
# Internal apps
'admin.common_auth',
'admin.base',
'admin.pre_reg',
'admin.spam',
'admin.metrics',
'admin.nodes',
'admin.users',
'admin.desk',
'admin.meetings',
'admin.institutions',
'admin.preprint_providers',
)
MIGRATION_MODULES = {
'osf': None,
'reviews': None,
'addons_osfstorage': None,
'addons_wiki': None,
'addons_twofactor': None,
}
USE_TZ = True
TIME_ZONE = 'UTC'
# local development using https
if osf_settings.SECURE_MODE and osf_settings.DEBUG_MODE:
INSTALLED_APPS += ('sslserver',)
# Custom user model (extends AbstractBaseUser)
AUTH_USER_MODEL = 'osf.OSFUser'
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'admin'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
CORS_ALLOW_CREDENTIALS = True
MIDDLEWARE = (
# TokuMX transaction support
# Needs to go before CommonMiddleware, so that transactions are always started,
# even in the event of a redirect. CommonMiddleware may cause other middlewares'
# process_request to be skipped, e.g. when a trailing slash is omitted
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.CeleryTaskMiddleware',
'api.base.middleware.PostcommitTaskMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'waffle.middleware.WaffleMiddleware',
)
MESSAGE_TAGS = {
messages.SUCCESS: 'text-success',
messages.ERROR: 'text-danger',
messages.WARNING: 'text-warning',
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
}
}]
ROOT_URLCONF = 'admin.base.urls'
WSGI_APPLICATION = 'admin.base.wsgi.application'
ADMIN_BASE = ''
STATIC_URL = '/static/'
LOGIN_URL = 'account/login/'
LOGIN_REDIRECT_URL = ADMIN_BASE
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, '../website/static'),
)
LANGUAGE_CODE = 'en-us'
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'public/js/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--verbosity=2']
# Keen.io settings in local.py
KEEN_PROJECT_ID = osf_settings.KEEN['private']['project_id']
KEEN_READ_KEY = osf_settings.KEEN['private']['read_key']
KEEN_WRITE_KEY = osf_settings.KEEN['private']['write_key']
KEEN_CREDENTIALS = {
'keen_ready': False
}
if KEEN_CREDENTIALS['keen_ready']:
KEEN_CREDENTIALS.update({
'keen_project_id': KEEN_PROJECT_ID,
'keen_read_key': KEEN_READ_KEY,
'keen_write_key': KEEN_WRITE_KEY
})
ENTRY_POINTS
|
= {'osf4m': 'osf4m', 'prereg_challenge_campaign': 'prereg',
'institution_campaign': 'institution'}
# Set in local.py
DESK_K
|
EY = ''
DESK_KEY_SECRET = ''
TINYMCE_APIKEY = ''
SHARE_URL = osf_settings.SHARE_URL
API_DOMAIN = osf_settings.API_DOMAIN
if DEBUG:
INSTALLED_APPS += ('debug_toolbar', 'nplusone.ext.django',)
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware', 'nplusone.ext.django.NPlusOneMiddleware',)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda(_): True,
'DISABLE_PANELS': {
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.redirects.RedirectsPanel'
}
}
# If set to True, automated tests with extra queries will fail.
NPLUSONE_RAISE = False
|
drxaero/calibre
|
src/calibre/gui2/convert/azw3_output.py
|
Python
|
gpl-3.0
| 978
| 0.00818
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.convert.azw
|
3_output_ui import Ui_Form
from calibre.gui2.convert import Widget
font_family_model = None
class PluginWidget(Widget, Ui_Form):
TITLE = _('AZW3 Output')
HELP = _('Options specific to')+' AZW3 '+_('output')
COMMIT_NAME = 'azw3_output'
ICON = I('mimetypes/azw3.png')
def __init__(self, parent, get_option, get_help, db=None, book_id=None):
Widget.__init__(self, parent,
|
['prefer_author_sort', 'toc_title',
'mobi_toc_at_start',
'dont_compress', 'no_inline_toc', 'share_not_sync',
]
)
self.db, self.book_id = db, book_id
self.initialize_options(get_option, get_help, db, book_id)
|
qubs/climate-data-api
|
core/views.py
|
Python
|
apache-2.0
| 102
| 0
|
fro
|
m django.shortcuts import render
def hom
|
e(request):
return render(request, "core/home.html")
|
rimsleur/QSLime
|
debugger/ObjectViewer.py
|
Python
|
gpl-2.0
| 412
| 0.026239
|
#!/usr/bin/python
# coding: utf
|
8
"""
Окно просмотра информации по объектам кода (константы, поля, записи, таблицы, списки)
"""
import sys
from PyQt4 import QtGui
class ObjectViewer (QtGui.QTableView):
def __init__ (self
|
, main_window):
QtGui.QTableView.__init__ (self)
main_window.object_viewer = self
self.main_window = main_window
|
jmimu/pyNekketsu
|
retrogamelib/dialog.py
|
Python
|
gpl-3.0
| 4,110
| 0.007056
|
import pygame
def arrow_image(color):
img = pygame.Surface((7, 6))
img.fill((226, 59, 252))
img.set_colorkey((226, 59, 252), pygame.RLEACCEL)
pygame.draw.polygon(img, color, ((0, 0), (3, 3), (6, 0)))
return img
class Menu(object):
def __init__(self, font, options):
self.font = font
self.options = options
self.option = 0
self.height = len(self.options)*(self.font.get_height())+(len(self.options)-1)*3
self.width = 0
for o in self.options:
w = (len(o)+1)*self.font.get_width()
if w > self.width:
self.width = w
def draw(self, surface, pos, background=None, border=None):
|
ypos = pos[1]
i = 0
if background:
pygame.draw.rect(surface, background, (pos[0]-4, pos[1]-4,
self.width+8, self.height+6))
if border:
pygame.draw.rect(surface, border, (pos[0]-4, pos[1]-4,
self.width+8, self.height+8), 1)
for opt in self.options:
if i == self.option:
icon = ">"
else:
|
icon = " "
ren = self.font.render(icon + opt)
surface.blit(ren, (pos[0], ypos))
ypos += ren.get_height()+3
i += 1
def move_cursor(self, dir):
if dir > 0:
if self.option < len(self.options)-1:
self.option += 1
elif dir < 0:
if self.option > 0:
self.option -= 1
def get_option(self):
return self.option, self.options[self.option]
class DialogBox(object):
def __init__(self, size, background_color, border_color, font):
self.dialog = []
self.image = pygame.Surface(size)
self.font = font
self.size = size
self.background_color = background_color
self.border_color = border_color
self.update_box()
self.text_pos = 0
self.shown = False
self.scroll_delay = 1
self.frame = 0
self.down_arrow = arrow_image(font.color)
self.curr_dialog=0
def set_scrolldelay(self, delay):
self.scroll_delay = delay
def set_dialog(self, dialog_list):
self.page = 0
self.pages = len(dialog_list)
self.dialog = dialog_list
self.shown = True
self.text_pos = 0
def update_box(self):
self.image.fill(self.background_color)
pygame.draw.rect(self.image, self.border_color,
(0, 0, self.size[0]-1, self.size[1]-1), 1)
def progress(self):
if (self.curr_dialog==0):
return
if (self.text_pos >= len(self.curr_dialog)):
if self.page < self.pages-1:
self.page += 1
self.text_pos = 0
else:
self.shown = False
else:
self.text_pos = len(self.curr_dialog)
def draw(self, surface, pos):
if self.shown and self.page < self.pages:
self.update_box()
self.curr_dialog = self.dialog[self.page]
xpos = 4
ypos = 4
if self.text_pos < len(self.curr_dialog):
self.frame -= 1
if self.frame <= 0:
self.text_pos += 1
self.frame = self.scroll_delay
else:
self.image.blit(self.down_arrow,
(self.image.get_width()-12,
self.image.get_height()-8))
dialog = self.curr_dialog[:self.text_pos]
for word in dialog.split(" "):
ren = self.font.render(word + " ")
w = ren.get_width()
if xpos > self.image.get_width()-w:
ypos += ren.get_height()+3
xpos = 4
self.image.blit(ren, (xpos, ypos))
xpos += w
surface.blit(self.image, pos)
def over(self):
return self.shown != True
def close(self):
self.shown = False
self.page = self.pages
|
terrorobe/barman
|
setup.py
|
Python
|
gpl-3.0
| 3,778
| 0.001853
|
#!/usr/bin/env python
#
# barman - Backup and Recovery Manager for PostgreSQL
#
# Copyright (C) 2011-2015 2ndQuadrant Italia (Devise.IT S.r.l.) <info@2ndquadrant.it>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Backup and Recovery Manager for PostgreSQL
Barman (Backup and Recovery Manager) is an open source administration
tool for disaster recovery of PostgreSQL servers written in Python.
It allows your organisation to perform remote backups of multiple servers
in business critical environments and help DBAs during the recovery
phase. Barman's most requested features include backup catalogues,
incremental backup, retention policies, remote backup and recovery,
archiving and compression of WAL files and backups.
Barman is written and maintained by PostgreSQL professionals 2ndQuadrant.
"""
import sys
# support fo running test through setup.py
# requires recent setuptools library
try:
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests']
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pyte
|
st
errno = pytest.main(self.test_args)
sys.exit(errno)
cmdclass={'test': PyTest}
except ImportError:
from distutils.core import setup
cmdclass={}
if sys.version_info < (2, 6):
raise SystemExit('ERROR: Barman needs at least python 2.6 to work')
install_requires = ['psycopg2', 'argh >= 0.21.2', 'python-dateutil', 'argcomplete
|
']
if sys.version_info < (2, 7):
install_requires.append('argparse')
barman = {}
with open('barman/version.py', 'r') as fversion:
exec (fversion.read(), barman)
setup(
name='barman',
version=barman['__version__'],
author='2ndQuadrant Italia (Devise.IT S.r.l.)',
author_email='info@2ndquadrant.it',
url='http://www.pgbarman.org/',
packages=['barman', ],
scripts=['bin/barman', ],
data_files=[
('share/man/man1', ['doc/barman.1']),
('share/man/man5', ['doc/barman.5']),
],
license='GPL-3.0',
description=__doc__.split("\n")[0],
long_description="\n".join(__doc__.split("\n")[2:]),
install_requires=install_requires,
platforms=['Linux', 'Mac OS X'],
classifiers=[
'Environment :: Console',
'Development Status :: 5 - Production/Stable',
'Topic :: System :: Archiving :: Backup',
'Topic :: Database',
'Topic :: System :: Recovery Tools',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
tests_require=['pytest', 'mock', 'pytest-capturelog', 'pytest-timeout'],
cmdclass=cmdclass,
use_2to3=True,
)
|
michael-ball/sublime-text
|
sublime-text-3/Packages/SublimeLinter/commands.py
|
Python
|
unlicense
| 39,327
| 0.001653
|
# coding: utf-8
#
# commands.py
# Part of SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Ryan Hileman and Aparajita Fishman
#
# Project: https://github.com/SublimeLinter/SublimeLinter3
# License: MIT
#
"""This module implements the Sublime Text commands provided by SublimeLinter."""
import datetime
from fnmatch import fnmatch
from glob import glob
import json
import os
import re
import shutil
import subprocess
import tempfile
from textwrap import TextWrapper
from threading import Thread
import time
import sublime
import sublime_plugin
from .lint import highlight, linter, persist, util
def error_command(method):
"""
A decorator that executes method only if the current view has errors.
This decorator is meant to be used only with the run method of
sublime_plugin.TextCommand subclasses.
A wrapped version of method is returned.
"""
def run(self, edit, **kwargs):
vid = self.view.id()
if vid in persist.errors and persist.errors[vid]:
method(self, self.view, persist.errors[vid], persist.highlights[vid], **kwargs)
else:
sublime.message_dialog('No lint errors.')
return run
def select_line(view, line):
"""Change view's selection to be the given line."""
point = view.text_point(line, 0)
sel = view.sel()
sel.clear()
sel.add(view.line(point))
class SublimelinterLintCommand(sublime_plugin.TextCommand):
"""A command that lints the current view if it has a linter."""
def is_enabled(self):
"""
Return True if the current view can be linted.
If the view has *only* file-only linters, it can be linted
only if the view is not dirty.
Otherwise it can be linted.
"""
has_non_file_only_linter = False
vid = self.view.id()
linters = persist.view_linters.get(vid, [])
for lint in linters:
if lint.tempfile_suffix != '-':
has_non_file_only_linter = True
break
if not has_non_file_only_linter:
return not self.view.is_dirty()
return True
def run(self, edit):
"""Lint the current view."""
from .sublimelinter import SublimeLinter
SublimeLinter.shared_plugin().lint(self.view.id())
class HasErrorsCommand:
"""
A mixin class for sublime_plugin.TextCommand subclasses.
Inheriting from this class will enable the command only if the current view has errors.
"""
def is_enabled(self):
"""Return True if the current view has errors."""
vid = self.view.id()
return vid in persist.errors and len(persist.errors[vid]) > 0
class GotoErrorCommand(sublime_plugin.TextCommand):
"""A superclass for commands that go to the next/previous error."""
def goto_error(self, view, errors, direction='next'):
"""Go to the next/previous error in view."""
sel = view.sel()
if len(sel) == 0:
sel.add(sublime.Region(0, 0))
saved_sel = tuple(sel)
empty_selection = len(sel) == 1 and sel[0].empty()
# sublime.Selection() changes the view's selection, get the point first
point = sel[0].begin() if direction == 'next' else sel[-1].end()
regions = sublime.Selection(view.id())
regions.clear()
for error_type in (highlight.WARNING, highlight.ERROR):
regions.add_all(view.get_regions(highlight.MARK_KEY_FORMAT.format(error_type)))
region_to_select = No
|
ne
# If going forward, find the first region beginning after the point.
# If going backward, find the first region ending before the point.
# If nothing is found in the given direction, wrap to the first/last region.
if direction == 'next':
for region in regions:
if (
(point == region.begin() and empty_selection and not region.empty()) or
(point < region.beg
|
in())
):
region_to_select = region
break
else:
for region in reversed(regions):
if (
(point == region.end() and empty_selection and not region.empty()) or
(point > region.end())
):
region_to_select = region
break
# If there is only one error line and the cursor is in that line, we cannot move.
# Otherwise wrap to the first/last error line unless settings disallow that.
if region_to_select is None and ((len(regions) > 1 or not regions[0].contains(point))):
if persist.settings.get('wrap_find', True):
region_to_select = regions[0] if direction == 'next' else regions[-1]
if region_to_select is not None:
self.select_lint_region(self.view, region_to_select)
else:
sel.clear()
sel.add_all(saved_sel)
sublime.message_dialog('No {0} lint error.'.format(direction))
@classmethod
def select_lint_region(cls, view, region):
"""
Select and scroll to the first marked region that contains region.
If none are found, the beginning of region is used. The view is
centered on the calculated region and the region is selected.
"""
marked_region = cls.find_mark_within(view, region)
if marked_region is None:
marked_region = sublime.Region(region.begin(), region.begin())
sel = view.sel()
sel.clear()
sel.add(marked_region)
# There is a bug in ST3 that prevents the selection from changing
# when a quick panel is open and the viewport does not change position,
# so we call our own custom method that works around that.
util.center_region_in_view(marked_region, view)
@classmethod
def find_mark_within(cls, view, region):
"""Return the nearest marked region that contains region, or None if none found."""
marks = view.get_regions(highlight.MARK_KEY_FORMAT.format(highlight.WARNING))
marks.extend(view.get_regions(highlight.MARK_KEY_FORMAT.format(highlight.ERROR)))
marks.sort(key=sublime.Region.begin)
for mark in marks:
if mark.contains(region):
return mark
return None
class SublimelinterGotoErrorCommand(GotoErrorCommand):
"""A command that selects the next/previous error."""
@error_command
def run(self, view, errors, highlights, **kwargs):
"""Run the command."""
self.goto_error(view, errors, **kwargs)
class SublimelinterShowAllErrors(sublime_plugin.TextCommand):
"""A command that shows a quick panel with all of the errors in the current view."""
@error_command
def run(self, view, errors, highlights):
"""Run the command."""
self.errors = errors
self.highlights = highlights
self.points = []
options = []
for lineno, line_errors in sorted(errors.items()):
if persist.settings.get("passive_warnings", False):
if self.highlights.line_type(lineno) != highlight.ERROR:
continue
line = view.substr(view.full_line(view.text_point(lineno, 0))).rstrip('\n\r')
# Strip whitespace from the front of the line, but keep track of how much was
# stripped so we can adjust the column.
diff = len(line)
line = line.lstrip()
diff -= len(line)
max_prefix_len = 40
for column, message in sorted(line_errors):
# Keep track of the line and column
point = view.text_point(lineno, column)
self.points.append(point)
# If there are more than max_prefix_len characters before the adjusted column,
# lop off the excess and insert an ellipsis.
column = max(column - diff, 0)
if column > max_prefix_len:
visible_line = '...' + line[column - max_prefix_len:]
column = max_pref
|
couchbasedeps/git-repo
|
git_config.py
|
Python
|
apache-2.0
| 21,644
| 0.014692
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import contextlib
import errno
import json
import os
import re
import ssl
import subprocess
import sys
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import time
from pyversion import is_python3
if is_python3():
import urllib.request
import urllib.error
else:
import urllib2
import imp
urllib = imp.new_module('urllib')
urllib.request = urllib2
urllib.error = urllib2
from signal import SIGTERM
from error import GitError, UploadError
import platform_utils
from repo_trace import Trace
if is_python3():
from http.client import HTTPException
else:
from httplib import HTTPException
from git_command import GitCommand
from git_command import ssh_sock
from git_command import terminate_ssh_clients
from git_refs import R_CHANGES, R_HEADS, R_TAGS
ID_RE = re.compile(r'^[0-9a-f]{40}$')
REVIEW_CACHE = dict()
def IsChange(rev):
return rev.startswith(R_CHANGES)
def IsId(rev):
return ID_RE.match(rev)
def IsTag(rev):
return rev.startswith(R_TAGS)
def IsImmutable(rev):
return IsChange(rev) or IsId(rev) or IsTag(rev)
def _key(name):
parts = name.split('.')
if len(parts) < 2:
return name.lower()
parts[ 0] = parts[ 0].lower()
parts[-1] = parts[-1].lower()
return '.'.join(parts)
class GitConfig(object):
_ForUser = None
@classmethod
def ForUser(cls):
if cls._ForUser is None:
cls._ForUser = cls(configfile = os.path.expanduser('~/.gitconfig'))
return cls._ForUser
@classmethod
def ForRepository(cls, gitdir, defaults=None):
return cls(configfile = os.path.join(gitdir, 'config'),
defaults = defaults)
def __init__(self, configfile, defaults=None, jsonFile=None):
self.file = configfile
self.defaults = defaults
self._cache_dict = None
self._section_dict = None
self._remotes = {}
self._branches = {}
self._json = jsonFile
if self._json is None:
self._json = os.path.join(
os.path.dirname(self.file),
'.repo_' + os.path.basename(self.file) + '.json')
def Has(self, name, include_defaults = True):
"""Return true if this configuration file has the key.
"""
if _key(name) in self._cache:
return True
if include_defaults and self.defaults:
return self.defaults.Has(name, include_defaults = True)
return False
def GetBoolean(self, name):
"""Returns a boolean from the configuration file.
|
None : The value was not defined, or is not a boolean.
True : The value was set to true or yes.
False: The value was set to false or no.
"""
v = self.GetString(name)
if v is None:
return None
v = v.lower()
if v in ('true', 'yes'):
return True
if v in ('false', 'no'):
return False
return None
def GetString(self, name, all_keys=False):
"""Get the first value for a key, or None if it
|
is not defined.
This configuration file is used first, if the key is not
defined or all_keys = True then the defaults are also searched.
"""
try:
v = self._cache[_key(name)]
except KeyError:
if self.defaults:
return self.defaults.GetString(name, all_keys = all_keys)
v = []
if not all_keys:
if v:
return v[0]
return None
r = []
r.extend(v)
if self.defaults:
r.extend(self.defaults.GetString(name, all_keys = True))
return r
def SetString(self, name, value):
"""Set the value(s) for a key.
Only this configuration file is modified.
The supplied value should be either a string,
or a list of strings (to store multiple values).
"""
key = _key(name)
try:
old = self._cache[key]
except KeyError:
old = []
if value is None:
if old:
del self._cache[key]
self._do('--unset-all', name)
elif isinstance(value, list):
if len(value) == 0:
self.SetString(name, None)
elif len(value) == 1:
self.SetString(name, value[0])
elif old != value:
self._cache[key] = list(value)
self._do('--replace-all', name, value[0])
for i in range(1, len(value)):
self._do('--add', name, value[i])
elif len(old) != 1 or old[0] != value:
self._cache[key] = [value]
self._do('--replace-all', name, value)
def GetRemote(self, name):
"""Get the remote.$name.* configuration values as an object.
"""
try:
r = self._remotes[name]
except KeyError:
r = Remote(self, name)
self._remotes[r.name] = r
return r
def GetBranch(self, name):
"""Get the branch.$name.* configuration values as an object.
"""
try:
b = self._branches[name]
except KeyError:
b = Branch(self, name)
self._branches[b.name] = b
return b
def GetSubSections(self, section):
"""List all subsection names matching $section.*.*
"""
return self._sections.get(section, set())
def HasSection(self, section, subsection = ''):
"""Does at least one key in section.subsection exist?
"""
try:
return subsection in self._sections[section]
except KeyError:
return False
def UrlInsteadOf(self, url):
"""Resolve any url.*.insteadof references.
"""
for new_url in self.GetSubSections('url'):
for old_url in self.GetString('url.%s.insteadof' % new_url, True):
if old_url is not None and url.startswith(old_url):
return new_url + url[len(old_url):]
return url
@property
def _sections(self):
d = self._section_dict
if d is None:
d = {}
for name in self._cache.keys():
p = name.split('.')
if 2 == len(p):
section = p[0]
subsect = ''
else:
section = p[0]
subsect = '.'.join(p[1:-1])
if section not in d:
d[section] = set()
d[section].add(subsect)
self._section_dict = d
return d
@property
def _cache(self):
if self._cache_dict is None:
self._cache_dict = self._Read()
return self._cache_dict
def _Read(self):
d = self._ReadJson()
if d is None:
d = self._ReadGit()
self._SaveJson(d)
return d
def _ReadJson(self):
try:
if os.path.getmtime(self._json) \
<= os.path.getmtime(self.file):
platform_utils.remove(self._json)
return None
except OSError:
return None
try:
Trace(': parsing %s', self.file)
with open(self._json) as fd:
return json.load(fd)
except (IOError, ValueError):
platform_utils.remove(self._json)
return None
def _SaveJson(self, cache):
try:
with open(self._json, 'w') as fd:
json.dump(cache, fd, indent=2)
except (IOError, TypeError):
if os.path.exists(self._json):
platform_utils.remove(self._json)
def _ReadGit(self):
"""
Read configuration data from git.
This internal method populates the GitConfig cache.
"""
c = {}
d = self._do('--null', '--list')
if d is None:
return c
if not is_python3():
d = d.decode('utf-8')
for line in d.rstrip('\0').split('\0'):
if '\n' in line:
key, val = line.split('\n', 1)
else:
key = line
val = None
if key in c:
c[key].append(val)
else:
c[key] = [val]
return c
def _do(self, *args):
command = ['config', '--file', self.file]
command.extend(args)
p = GitCommand(None,
command,
|
arizvisa/syringe
|
lib/mpdebug/default/host.py
|
Python
|
bsd-2-clause
| 491
| 0
|
class base(object):
|
proc = dict
pagesize = pow(2, 12)
arch = None
# and other host-specific attributes
def __init__(self, **kwds):
pass
def __getitem__(self, processid):
return self.proc[processid]
def list(self):
pass
def create(self, executable, args, env=[], directory='.', **kwds):
pass
def attach(self, id):
|
pass
def detach(self, id):
pass
def terminate(self, id):
pass
#######
|
eddiejessup/nex
|
tests/test_reader.py
|
Python
|
mit
| 3,129
| 0.00032
|
import pytest
from nex.reader import Reader, ReaderBuffer
from common import (test_not_here_file_name, test_file_name, test_chars,
test_2_chars)
def test_buffer_init():
"""Check buffer works sensibly."""
r = ReaderBuffer(test_chars)
assert r.i == -1
assert r.chars == test_chars
def test_next_char():
"""Check advancing through a file returns the correct characters."""
r = Reader()
r.insert_chars(test_chars)
cs = [r.advance_loc() for _ in range(4)]
assert cs == test_chars
with pytest.raises(EOFError):
r.advance_loc()
def test_init_missing_file():
"""Check inserting a non-existent file into a reader raises an error."""
r = Reader()
with pytest.raises(IOError):
r.insert_file(test_not_here_file_name)
def test_init_file():
"""Check inserting a non-existent file into a reader raises an error."""
r_direct = Reader()
r_direct.insert_chars(test_chars)
r_file = Reader()
r_file.insert_file(test_file_name)
assert list(r_direct.advance_to_end()) == list(r_file.advance_to_end())
def test_insert_start():
"""Check inserting a new file at the start reads from the second, then the
first."""
r = Reader()
r.insert_chars(test_chars)
r.insert_chars(test_2_chars)
assert list(r.advance_to_end()) == test_2_chars + test_chars
def test_insert_middle():
"""Check inserting a new file halfway through reading a first, reads part
of one, then the second, then the rest of the first."""
r = Reader()
r.insert_chars(test_chars)
cs = [r.advance_loc()]
r.insert_chars(test_2_chars)
cs.extend(list(r.advance_to_end()))
assert cs == ['a', 'd', 'e', 'f', '\n', 'b', 'c', '\n']
def test_insert_end():
"""Check in
|
serting a new file after reading a first, reads the first then the second."""
r = Reader()
r.insert_chars(test_chars)
cs = list(r.advance_to_end())
r.insert_chars(test_2_chars)
cs.extend(list(r.advance_to_end()))
assert cs == test_chars + test_2_chars
def test_peek():
"""Test various errors and constraints on peeking."""
r = Reader()
r.inse
|
rt_chars(test_chars)
# Can't peek at start of buffer
with pytest.raises(ValueError):
r.peek_ahead(n=0)
r.advance_loc()
assert r.current_char == 'a'
# Can't peek backwards, (especially because this would be end of buffer).
with pytest.raises(ValueError):
r.peek_ahead(n=-1)
# Valid peeks.
assert [r.peek_ahead(n=i) for i in range(4)] == test_chars
# Can't peek too far ahead.
with pytest.raises(ValueError):
r.peek_ahead(n=4)
r.advance_loc()
assert r.current_char == 'b'
# Can't peek past end of buffer.
with pytest.raises(EOFError):
r.peek_ahead(n=3)
def test_advance():
"""Test advancing through the reader on one buffer."""
r = Reader()
r.insert_chars(test_chars)
cs = []
for _ in range(4):
r.advance_loc()
cs.append(r.peek_ahead(0))
assert cs == test_chars
# TODO: Line and column numbering.
# TODO: Peeking and advancing on buffers.
|
bosmanoglu/adore-doris
|
lib/python/insar/__init__.py
|
Python
|
gpl-2.0
| 25,698
| 0.033349
|
# -insar.py- coding: utf-8 -*-
"""
Created on Fri Sep 3 10:46:50 2010
@author: bosmanoglu
InSAR module. Includes functions for analyzing SAR interferometry with python.
"""
from numpy import *
from pylab import *
from basic import *
import scipy
from scipy import ndimage #scipy.pkgload('ndimage')
from scipy import signal #scipy.pkgload('signal') #ndimage
from scipy import interpolate #scipy.pkgload('interpolate'); #interp1d,RectBivariateSpline
from scipy import constants #scipy.pkgload('scipy.constants')
from scipy import optimize #scipy.pkgload('optimize')
from scipy import stats
import time_series
import pdb
try:
import stack
from cutting_edge import *
except:
pass
def coh2snr(coh):
return coh/(1.-coh);
def snr2coh(snr):
return snr/(snr+1.);
def coh2pdf(coh,n=100):
domain=linspace(-pi,pi,n);
pdf=(1-coh**2)/(2*pi) \
/ (1-coh**2 * cos(domain)**2) \
* (1 \
+ (coh*cos(domain)*arccos(-1*coh*cos(domain))) \
/ sqrt(1-coh**2*cos(domain)**2) \
)
return pdf
def coh2pdfML(coh,L,n=100,domain=None):
"""coh2pdfML(coh,L,n=100,domain=None)
coh: scalar or vector.
L= scalar, multilook factor
n=100, number of samples in domain [-pi,pi]
domain=vector or [#coh, n] . user specified domains. First axis has to be the same as size(coh).
"""
import scipy
from scipy import special #scipy.pkgload('special')
G=scipy.special.gamma #math.gamma #returns the gamma function value at X, same as scipy.special.gamma
F=scipy.special.hyp2f1 #returns gauss hypergeometric function
if domain is None:
domain=linspace(-pi,pi,n);
if domain.shape[0] == coh.size:
#user specified domain. Should be the same number of elements with coh:
#ccd=dot(atleast_2d(coh), atleast_2d(cos(domain)))
coh=tile(coh, (domain.shape[1],1)).T
ccd=coh*cos(domain);
else:
ccd=dot(atleast_2d(coh).T, atleast_2d(cos(domain))) #Coherence Cos Domain
coh=tile(coh, (domain.shape[0],1)).T
pdf=(1-coh**2)**L/(2*pi) \
* F(L, 1, 0.5,ccd**2) \
+ (G(L+0.5)*(1-coh**2)**L * ccd) \
/ (2*sqrt(pi) * G(L) * (1-ccd**2)**(L+0.5))
return pdf
def coh2stdpha(coh,n=100,lut=None):
'''coh2stdpha(coh,n=100,lut=None)
n:number of samples between -pi +pi
lut: number of samples in look-up-table
ex:
stdpha=coh2stdpha(coh)
stdpha=coh2stdpha(coh,lut=100); #This is much faster but only accurate to 1/100th of coh.max()=1 and coh.min()=0.
'''
if isinstance(coh,list):
coh=array(coh)
elif isinstance(coh,float):
coh=array([coh])
domain=linspace(-pi,pi,n);
dims=coh.shape
stdpha=zeros(dims)
if lut is None:
for k in xrange(size(coh)):#r_[0:size(coh)]:
#numpy.trapz(Y,X) = matlab.trapz(X,Y)
idx=unravel_index(k, dims)
stdpha[idx]=sqrt(trapz(domain**2*coh2pdf(coh[idx],n),domain));
else:
lutx=linspace(coh.min(), coh.max(), lut); #lutx=look up table x
luty=zeros(lutx.shape); # luty=look up table y
for k in xrange(len(lutx)):
luty[k]=sqrt(trapz(domain**2*coh2pdf(lutx[k],n),domain));
lutf=scipy.interpolate.interp1d(lutx,luty, 'linear')
stdpha=lutf(coh)
return stdpha
def coh2stdphaML(coh,L,n=100,lut=None):
'''coh2stdpha(coh,L,n=100,lut=None)
n:number of samples between -pi +pi
lut: number of samples in look-up-table
ex:
L=iobj.coherence.Multilookfactor_azimuth_direction * iobj.coherence.Multilookfactor_range_direction
stdpha=coh2stdpha(coh,L)
stdpha=coh2stdpha(coh,L,lut=100); #This is much faster but only accurate to 1/100th of coh.max()=1 and coh.min()=0.
'''
if isinstance(coh,list):
coh=array(coh)
elif isinstance(coh, number):
coh=array([coh])
#elif isinstance(coh,float):
# coh=array([coh])
domain=linspace(-pi,pi,n);
dims=coh.shape
stdpha=zeros(dims)
if lut is None:
for k in xrange(size(coh)):#r_[0:size(coh)]:
#numpy.trapz(Y,X) = matlab.trapz(X,Y)
idx=unravel_index(k, dims)
stdpha[idx]=sqrt(trapz(domain**2*coh2pdfML(coh[idx],L,n),domain));
else:
lutx=linspace(coh.min(), coh.max(), lut); #lutx=look up table x
luty=zeros(lutx.shape); # luty=look up table y
for k in xrange(len(lutx)):
luty[k]=sqrt(trapz(domain**2*coh2pdfML(lutx[k],L,n),domain));
lutf=scipy.interpolate.interp1d(lutx,
|
luty, 'linear')
stdpha=lutf(coh)
return stdpha
def stdpha2coh(stdpha, L=1, n=100, lut=100):
'''stdpha2cohML(stdpha, L=1, n=100, lut=100):
Creates a lookup table for coherence to stdpha and uses it to reverse the relation
'''
if isinstance(stdpha,list):
stdpha=array(stdpha)
elif isinstance(stdpha, number):
stdpha=array([stdpha])
domain=linspace(-pi,pi,n);
lutx=linspace(0.01, 0.99, lut); #lutx=look up table x
luty=zeros(lutx.shape); # luty=look u
|
p table y
for k in xrange(len(lutx)):
luty[k]=sqrt(trapz(domain**2*coh2pdfML(lutx[k],L,n),domain));
lutf=scipy.interpolate.interp1d(flipud(luty),flipud(lutx), 'linear', bounds_error=False)
coh=lutf(stdpha);
coh[stdpha > luty.max() ]=0.01;
coh[stdpha < luty.min() ]=0.99;
return coh
def gradient_coherence(m,s=None, w=(5,5), low_pass=True):
if any(iscomplexobj(m)):
mg0,mg1=cpxgradient(m)
else:
mg0,mg1=gradient(m)
if s is None:
s=empty(m.shape, dtype=complex);
s[:]=1.+0.j
if any(iscomplexobj(s)):
sg0,sg1=cpxgradient(s)
else:
sg0,sg1=gradient(s)
if low_pass is True:
mg0=scipy.ndimage.generic_filter(mg0, mean, size=w)
mg1=scipy.ndimage.generic_filter(mg1, mean, size=w)
sg0=scipy.ndimage.generic_filter(sg0, mean, size=w)
sg1=scipy.ndimage.generic_filter(sg1, mean, size=w)
#pdb.set_trace()
return coherence(mg0+1j*mg1, sg0+1j*sg1, w=w)
def coherence(m,s=None,w=(5,5)):
'''coherence(master, slave=None, window):
input is master and slave complex images (tested for 1D only)
w is the calculation window.
'''
coh=zeros(size(m))
corrFilter= ones(w)
nfilt=corrFilter.size
corrFilter=corrFilter/nfilt
# Em=scipy.ndimage.filters.correlate(m*conj(m),corrFilter,mode='nearest')
# Es=scipy.ndimage.filters.correlate(s*conj(s),corrFilter,mode='nearest')
# Ems=scipy.ndimage.filters.correlate(m*conj(s),corrFilter,mode='nearest')
if s is None:
s=empty(m.shape, dtype=complex)
s[:]=exp(1.j*0);
Em=scipy.signal.signaltools.correlate(m*conj(m), corrFilter, mode='same')
Es=scipy.signal.signaltools.correlate(s*conj(s), corrFilter, mode='same')
Ems=scipy.signal.signaltools.correlate(m*conj(s), corrFilter, mode='same')
coh=abs(Ems / (sqrt(Em**2+Es**2)/sqrt(2))) #need to divide by two to get root mean square
# for k in r_[0:len(m)]:
# if k+w>=len(m):
# a=k+w-len(m)+1
# else:
# a=0
# mw=m[k-a:k+w]
# sw=s[k-a:k+w]
# coh[k]=mean(mw*conj(sw))/sqrt(mean(mw*conj(mw))*mean(sw*conj(sw)))
return coh
def crosscorrelate(m,s):
"""crosscorrelation(m,s):
"""
coh=zeros(size(m))
#corrFilter= ones(m.shape)
#nfilt=corrFilter.size
#corrFilter=corrFilter/nfilt
#m=rescale(m, [-1,1]);
#m=m-m.mean()
#s=rescale(s, [-1,1]);
#s=s-s.mean()
Em=(m*m.conj()).mean() # Em=(m**2.).sum()
Es=(s*s.conj()).mean() # Es=(s**2.).sum()
Ems=(m*s.conj()).mean() # Ems=(m*s).sum()
#Em=scipy.signal.signaltools.correlate(m*m, corrFilter, mode='same')
#Es=scipy.signal.signaltools.correlate(s*s, corrFilter, mode='same')
#Ems=scipy.signal.signaltools.correlate(m*s, corrFilter, mode='same')
coh=abs(Ems / sqrt(Em*Es))#1.4142135623730949#(2./sqrt(2.))
return coh
def correlate(m,s,w):
coh=zeros(m.shape)
w0=int(w[0]/2.)
w1=int(w[1]/2.)
for k in xrange(m.shape[0]):
for l in xrange(m.shape[1]):
if k<w0:
kk=
|
drammock/mne-python
|
mne/externals/tqdm/_tqdm/auto.py
|
Python
|
bsd-3-clause
| 231
| 0
|
import warnings
from .std import TqdmExperimentalWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore",
|
category=TqdmExperimentalWarning)
from .autono
|
tebook import tqdm, trange
__all__ = ["tqdm", "trange"]
|
FlowFX/unkenmathe.de
|
src/system_tests/test_adding_and_editing_exercises.py
|
Python
|
agpl-3.0
| 4,459
| 0.000673
|
"""Selenium tests."""
from .conftest import assert_regex, wait_for, wait_for_true
from selenium.common.exceptions import NoSuchElementException
from um.exercises.factories import ExerciseFactory
import pytest
import time
def test_florian_adds_a_new_exercise(browser, live_server):
# Florian wants to add a new exercise.
# He starts by opening the home page,
browser.get(live_server.url)
# and sees that it's there.
wait_for(lambda: browser.find_element_by_id('id_add_exercise'))
# He finds the "Plus" navbar menu and clicks it.
browser.find_element_by_id('navbarDropdownPlusMenu').click()
time.sleep(0.5)
# There is the the "Add new exercise" button.
browser.find_element_by_id('id_add_exercise').click()
# Next, he is presented a form to create a new exercise
wait_for(lambda: browser.find_element_by_tag_name('form'))
assert_regex(browser.current_url, '.+/new')
# He enters a simple exercise into the text area,
browser.find_element_by_id('id_text').send_keys('What is 5 + 4?')
# and clicks the submit button.
browser.find_element_by_id('submit-id-submit').click()
# Then, he gets back to the home page,
wait_for(lambda: browser.find_element_by_id('id_add_exercise'))
assert_regex(browser.current_url, '.+/')
# and the new exercise is displayed there.
assert 'What is 5 + 4?' in browser.page_source
def test_user_edits_an_exercise(browser, live_server, user):
# GIVEN an existing exercise
ex = ExerciseFactory.create(author=user)
# Florian goes to the home page and wants to edit this exercise
browser.get(live_server.url)
# and sees that it's there.
wait_for(lambda: browser.find_element_by_id(f'id_edit_{ex.id}'))
# He clicks the Edit button,
browser.find_element_by_id(f'id_edit_{ex.id}').click()
# and gets to the update form.
wait_for(lambda: browser.find_element_by_tag_name('form'))
assert_regex(browser.current_url, f'.+/{ex.id}/edit')
# He replaces the exercise text,
textarea = browser.find_element_by_id('id_text')
textarea.clear()
textarea.send_keys('This exercise isn\'t good enough. \( 5 + 4 = 9 \).')
# and clicks submit.
browser.find_element_by_id('submit-id-submit').click()
# Then, he gets back to the home page,
wait_for(lambda: browser.find_element_by_id('id_add_exercise'))
assert_regex(browser.current_url, '.+/')
# and the new text is displayed.
assert 'This exercise ' in browser.page_source
def test_anonymous_user_views_an_exercise(anon_browser, live_server):
browser = anon_browser
# GIVEN an existing exercise
ex = ExerciseFactory.create()
# Florian goes to the home page and wants to inspect the exercise,
browser.get(live_server.url)
# sees that it's there.
wait_for(lambda: browser.find_element_by_id(f'id_detail_{ex.id}'))
# He clicks the Details button,
browser.find_element_by_id(f'id_detail_{ex.id}').click()
# and gets to the detail view.
wait_for(lambda: browser.find_element_by_id('id_text'))
assert_regex(browser.current_url, f'.+/{ex.id}/')
# He clicks the `back` button.
browser.find_element_by_id('back-id-back').c
|
lick()
# Then, he gets back to the ho
|
me page,
assert_regex(browser.current_url, '.+/')
def test_florian_deletes_an_exercise(browser, live_server, user):
# GIVEN an existing exercise
ex = ExerciseFactory.create(author=user)
# Florian goes to the home page and wants to delete this exercise
browser.get(live_server.url)
# and sees that it's there.
wait_for(lambda: browser.find_element_by_id(f'id_detail_{ex.id}'))
# He clicks the View button,
browser.find_element_by_id(f'id_detail_{ex.id}').click()
# and gets to the detail view
wait_for(lambda: browser.find_element_by_id(f'id_delete_{ex.id}'))
assert_regex(browser.current_url, f'.+/{ex.id}/')
# He clicks the "Delete" button
browser.find_element_by_id(f'id_delete_{ex.id}').click()
# let the modal pop up
time.sleep(0.5)
# And confirms the deletion
browser.find_element_by_id('submit-id-submit').click()
# Then, he gets back to the home page,
wait_for(lambda: browser.find_element_by_id('id_add_exercise'))
assert_regex(browser.current_url, '.+/')
# and the exercise is gone.
with pytest.raises(NoSuchElementException):
browser.find_element_by_id(f'id_detail_{ex.id}')
|
abalakh/robottelo
|
tests/foreman/ui/test_role.py
|
Python
|
gpl-3.0
| 3,848
| 0
|
# -*- encoding: utf-8 -*-
"""Test class for Roles UI"""
from ddt import ddt
from fauxfactory import gen_string
from nailgun import entities
from robottelo.decorators import data
from robottelo.helpers import generate_strings_list, invalid_names_list
from robottelo.test import UITestCase
from robottelo.ui.factory import make_role
from robottelo.ui.locators import common_locators
from robottelo.ui.session import Session
@ddt
class Role(UITestCase):
"""Implements Roles tests from UI"""
@data(*generate_strings_list(len1=10))
def test_create_role_basic(self, n
|
ame):
"""@Test: Create new role
@Feature: Role - Positive Create
@Assert: Role is created
"""
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(self.role.search(name))
@data('', ' ')
def test_negative
|
_create_role_with_blank_name(self, name):
"""@Test: Create new role with blank and whitespace in name
@Feature: Role - Negative Create
@Assert: Role is not created
"""
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['name_haserror']))
@data(*invalid_names_list())
def test_negative_create_role_with_too_long_names(self, name):
"""@Test: Create new role with 256 characters in name
@Feature: Role - Negative Create
@Assert: Role is not created
"""
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['name_haserror']))
@data(*generate_strings_list(len1=10))
def test_remove_role(self, name):
"""@Test: Delete an existing role
@Feature: Role - Positive Delete
@Assert: Role is deleted
"""
with Session(self.browser) as session:
make_role(session, name=name)
self.role.delete(name)
@data(*generate_strings_list(len1=10))
def test_update_role_name(self, new_name):
"""@Test: Update role name
@Feature: Role - Positive Update
@Assert: Role is updated
"""
name = gen_string('utf8')
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(self.role.search(name))
self.role.update(name, new_name)
self.assertIsNotNone(self.role.search(new_name))
def test_update_role_permission(self):
"""@Test: Update role permissions
@Feature: Role - Positive Update
@Assert: Role is updated
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(self.role.search(name))
self.role.update(
name,
add_permission=True,
resource_type='Architecture',
permission_list=['view_architectures', 'create_architectures'],
)
def test_update_role_org(self):
"""@Test: Update organization under selected role
@Feature: Role - Positive Update
@Assert: Role is updated
"""
name = gen_string('alpha')
org = entities.Organization().create()
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(self.role.search(name))
self.role.update(
name,
add_permission=True,
resource_type='Activation Keys',
permission_list=['view_activation_keys'],
organization=[org.name],
)
|
transcranial/keras-js
|
python/model_pb2.py
|
Python
|
mit
| 6,998
| 0.003144
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: model.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='model.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0bmodel.proto\"\x89\x01\n\x07Weights\x12\x12\n\nlayer_name\x18\x01 \x01(\t\x12\x13\n\x0bweight_name\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\r\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\x12\x14\n\x0cquantize_min\x18\x06 \x01(\x02\x12\x14\n\x0cquantize_max\x18\x07 \x01(\x02\"\x80\x01\n\x05Model\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x15\n\rkeras_version\x18\x03 \x01(\t\x12\x0f\n\x07\x62\x61\x63kend\x18\x04 \x01(\t\x12\x14\n\x0cmodel_config\x18\x05 \x01(\t\x12\x1f\n\rmodel_weights\x18\x06 \x03(\x0b\x32\x08.Weightsb\x06proto3')
)
_WEIGHTS = _descriptor.Descriptor(
name='Weights',
full_name='Weights',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='layer_name', full_name='Weights.layer_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_name', full_name='Weights.weight_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shape', full_name='Weights.shape', index=2,
number=3, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='Weights.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='Weights.data', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quantize_min', full_name='Weights.quantize_min', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quantize_max', full_name='Weights.quantize_max', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=153,
)
_MODEL = _descriptor.Descriptor(
name='Model',
full_name='Model',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descripto
|
r.FieldDescriptor(
name='id', full_name='Model.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='Model.name', index=1,
number=2, t
|
ype=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='keras_version', full_name='Model.keras_version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='backend', full_name='Model.backend', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model_config', full_name='Model.model_config', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model_weights', full_name='Model.model_weights', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=156,
serialized_end=284,
)
_MODEL.fields_by_name['model_weights'].message_type = _WEIGHTS
DESCRIPTOR.message_types_by_name['Weights'] = _WEIGHTS
DESCRIPTOR.message_types_by_name['Model'] = _MODEL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Weights = _reflection.GeneratedProtocolMessageType('Weights', (_message.Message,), dict(
DESCRIPTOR = _WEIGHTS,
__module__ = 'model_pb2'
# @@protoc_insertion_point(class_scope:Weights)
))
_sym_db.RegisterMessage(Weights)
Model = _reflection.GeneratedProtocolMessageType('Model', (_message.Message,), dict(
DESCRIPTOR = _MODEL,
__module__ = 'model_pb2'
# @@protoc_insertion_point(class_scope:Model)
))
_sym_db.RegisterMessage(Model)
# @@protoc_insertion_point(module_scope)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.