max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
exercicios_programas/ex7_contadores/exercicio5.7.py
|
robinson-1985/livro_python
| 0
|
12778851
|
''' 5.7 Modifique o programa anterior de forma que o usuário também digite o
início e o fim da tabuada, em vez de começar com 1 e 10. '''
| 2
| 2
|
island_backup/islands/the2chan.py
|
mishrasanskriti802/island-backup
| 17
|
12778852
|
<filename>island_backup/islands/the2chan.py
from .bases import BasePage, BaseBlock
from bs4 import BeautifulSoup
import re
from urllib import parse
def openbr2closebr(html: str):
return html.replace('<br>', '<br/>')
class The2ChanBlock(BaseBlock):
request_info = {
'cdn_host': None,
'headers': {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Dnt': '1',
'Pragma': 'no-cache',
'Referer': 'http://www.2chan.net/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36'
}
}
def __init__(self, block_data, page_domain):
super().__init__(block_data)
self._page_domain = page_domain
@property
def id(self):
info_string = self._block.find('a', class_='del').previous
return info_string[info_string.find('No')+3:].strip()
@property
def uid(self):
return self._block.find('font', color='#117743').text
@property
def created_time(self):
info_string = self._block.find('a', class_='del').previous
return info_string[: info_string.find('No')].strip()
def _get_content(self):
div = self._block.find('blockquote')
return ''.join(str(e).strip() for e in div.contents)
def _deal_with_reply(self, content):
return re.sub(r'<font color.*?>(.*?)</font>', r'<span class="reply-color">\1</span>', content)
@property
def image_url(self):
tag = self._block.find('a', target='_blank')
if not tag:
return None
path = tag.attrs.get('href')
url = parse.urljoin(self._page_domain, path)
return url
class The2ChanFirstBlock(The2ChanBlock):
@property
def image_url(self):
tag = self._block.find('a', target='_blank', recursive=False)
if not tag:
return None
path = tag.attrs.get('href')
url = parse.urljoin(self._page_domain, path)
return url
class The2ChanPage(BasePage):
block_model = The2ChanBlock
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
html = openbr2closebr(self.data)
self.bs = BeautifulSoup(html, 'html.parser')
@staticmethod
def url_page_combine(base_url, page_num):
return base_url
def has_next(self):
return False
@property
def total_page(self):
return None
@staticmethod
def get_thread_id(url):
return re.match(r'.*?/(\d+)\.htm', url).group(1)
def thread_list(self):
domain = parse.urljoin(self.base_url, '/')
top = The2ChanFirstBlock(self.bs.find(class_='thre'), page_domain=domain)
threads = [self.block_model(b, page_domain=domain) for b in self.bs.find_all('td', class_='rtd')]
threads.insert(0, top)
return threads
@staticmethod
def sanitize_url(url):
parts = parse.urlparse(url)
return '{0.scheme}://{0.netloc}{0.path}'.format(parts)
| 2.96875
| 3
|
src/comparator_code_processor.py
|
williamnash/CSCCoffea
| 0
|
12778853
|
"""Processor to create histograms and generate plots from fed comparator code."""
import awkward as ak
from coffea import hist, processor
from coffea.nanoevents.methods import candidate
ak.behavior.update(candidate.behavior)
class ComparatorCodeProcessor(processor.ProcessorABC):
"""Runs the analysis."""
def __init__(self):
dataset_axis = hist.Cat("pcc", "Pattern-Comparator Code Combination")
"""Initialize."""
"""First, you need to define a multi-dimensional histogram to hold
the data. Follow the form.
"tree": hist.Hist(
"Thing we're counting",
hist.Bin("leaf", "$units$", #number of bins, #min value, #max value),
),"""
self._accumulator = processor.dict_accumulator(
{
"allevents": processor.defaultdict_accumulator(float),
"events": hist.Hist(
"Events",
dataset_axis,
hist.Bin("nMuons", "Number of muons", 6, 0, 6),
),
"LUT": hist.Hist(
"LUT",
dataset_axis,
hist.Bin("position", "$position$", 20, -1, 0),
hist.Bin("slope", "$slope$", 20, -0.5, 0.5),
hist.Bin("pt", "$pt$", 50, 0, 50),
hist.Bin("multiplicity", "$multiplicity$", 3, 1, 4),
),
}
)
@property
def accumulator(self):
"""Return pieces added together for each parallel processor."""
return self._accumulator
def process(self, events):
"""Operation done for each event."""
output = self.accumulator.identity()
dataset = events.metadata["dataset"]
output["allevents"][dataset] += len(events)
"""Now, you'll need to unzip the variable, this stores the data into
the histograms we defined earlier.
variable = ak.zip(
{
"leaf": location_in_root_file,
},
)"""
"""Finally, we must assign the histograms to the output to return
to template_executor.py for plotting.
output["variable"].fill(
leaf=ak.flatten(variable.leaf),
)"""
lut = ak.zip(
{
"position": events.position,
"slope": events.slope,
"pt": events.pt,
"multiplicity": events.multiplicity,
},
)
output["LUT"].fill(
pcc=dataset,
position=lut.position,
slope=lut.slope,
pt=lut.pt,
multiplicity=lut.multiplicity,
)
return output
def postprocess(self, accumulator):
"""Return our total."""
return accumulator
| 3.046875
| 3
|
src/network_analyzer/cli.py
|
nekhaly/network-analyzer
| 0
|
12778854
|
import click
from network_analyzer.analyzer import Analyzer
@click.group()
def main():
pass
@main.command(short_help="Analyze networks")
@click.option(
"--jsonrpc",
help="JsonRPC URL of the ethereum client",
default="https://tlbc.rpc.anyblock.tools",
show_default=True,
metavar="URL",
)
@click.option(
"--relay",
"relay_api_url",
help="Relay API URL",
default="http://localhost:5000/api/v1",
show_default=True,
metavar="URL",
)
@click.option(
"--output",
"output_path",
help="Path of the directory to output the csv to",
default=None,
type=click.Path(dir_okay=True, writable=True),
)
def analyze(jsonrpc: str, relay_api_url: str, output_path: str):
analyzer = Analyzer(jsonrpc, output_path, relay_api_url)
analyzer.analyze_bridge_transfers()
analyzer.analyze_networks()
analyzer.analyze_dead_identities()
| 2.578125
| 3
|
lambda/functions/config.py
|
tylabs/quicksand
| 46
|
12778855
|
<gh_stars>10-100
import boto3
import botocore
# General Settings Here
# S3 bucket - don't need the secret if you have given lambda permissions for s3
boto_s3 = boto3.client(
's3',
region_name='##region###',
config=botocore.config.Config(s3={'addressing_style':'path'})
)
qs_bucket = '##bucketname###'
qs_url = 'https://scan.tylabs.com/report?uuid='
| 1.648438
| 2
|
aas_timeseries/data.py
|
astrofrog/aas-time-series-affiliated
| 3
|
12778856
|
<filename>aas_timeseries/data.py<gh_stars>1-10
import uuid
from astropy.units import Quantity, UnitsError
__all__ = ['Data']
class Data:
def __init__(self, time_series):
self.time_series = time_series
self.uuid = str(uuid.uuid4())
self.time_column = 'time'
def column_to_values(self, colname, unit):
# First make sure the column is a quantity
quantity = Quantity(self.time_series[colname], copy=False)
if quantity.unit.is_equivalent(unit):
return quantity.to_value(unit)
else:
raise UnitsError(f"Cannot convert the units '{quantity.unit}' of "
f"column '{colname}' to the required units of "
f"'{unit}'")
def unit(self, colname):
return Quantity(self.time_series[colname], copy=False).unit
| 3.125
| 3
|
ufcnn-keras/models/mnist_autoencoder_1d.py
|
mikimaus78/ml_monorepo
| 51
|
12778857
|
<reponame>mikimaus78/ml_monorepo<filename>ufcnn-keras/models/mnist_autoencoder_1d.py
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.layers.convolutional_transpose import Convolution1D_Transpose
"""
modified from https://github.com/loliverhennigh/All-Convnet-Autoencoder-Example
An autoencoder with 2D Convolution-Transpose layer in TF
"""
def save_neuralnet (model, model_name):
json_string = model.to_json()
open(model_name + '_architecture.json', 'w').write(json_string)
model.save_weights(model_name + '_weights.h5', overwrite=True)
def load_neuralnet (model_name):
# The Convolution1D_Transpose class needs to be stated while loading!
model = model_from_json(open(model_name+'_architecture.json').read(),{'Convolution1D_Transpose':Convolution1D_Transpose})
model.load_weights(model_name+'_weights.h5')
return model
batch_size = 100 # total number of elements in the X_ and Y_ (60000 train, 10000 test) arrays must be a multiple of batch_size!
nb_epoch = 500
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 28 * 14
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 2
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
#Y_train = np_utils.to_categorical(y_train, nb_classes)
#Y_test = np_utils.to_categorical(y_test, nb_classes)
Y_train = X_train.reshape(-1,1,784)
Y_test = X_test.reshape(-1,1,784)
print("Y SHAPE",Y_train.shape)
model = Sequential()
nb_filter = 16
# input 28 * 28, output 24 * 24
model.add(Convolution2D(nb_filter, 5, 5, input_shape=((1, img_rows, img_cols)), border_mode = 'valid'))
model.add(Activation('relu'))
# input 24 * 24, output 20 * 20
model.add(Convolution2D(nb_filter, 5, 5, border_mode = 'valid'))
model.add(Activation('relu'))
# input 20 * 20, output 16 * 16
model.add(Convolution2D(nb_filter, 5, 5, border_mode = 'valid'))
model.add(Activation('relu'))
# input 16 * 16, output 12 * 12 * 16
model.add(Convolution2D(nb_filter, 5, 5, border_mode = 'valid'))
model.add(Activation('relu'))
# input 12 * 12, output 8 * 8 * 16
model.add(Convolution2D(nb_filter, 5, 5, border_mode = 'valid'))
model.add(Activation('relu'))
# input 8 * 8, output 7 * 7 * 16
model.add(Convolution2D(5, 2, 2, border_mode = 'valid'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dropout(0.2))
#model.add(Reshape((1,1,98)))
model.add(Reshape((5,49)))
# input input 98, needs to be blown up 8 fold
W_shape = [2, 5, 5]
b_shape = [5]
strides = [1,2,1]
padding='valid'
# keep everything 1:1
# double everything
#deconv_shape = [batch_size, output_size_y, output_size_x, number_of_filters]
deconv_shape = [batch_size, 98*1, 5]
model.add(Convolution1D_Transpose(deconv_shape=deconv_shape, W_shape=W_shape, b_shape=b_shape, strides=strides, padding=padding))
model.add(Activation('relu'))
deconv_shape = [batch_size, 98*2, 5]
model.add(Convolution1D_Transpose(deconv_shape=deconv_shape, W_shape=W_shape, b_shape=b_shape, strides=strides, padding=padding))
model.add(Activation('relu'))
deconv_shape = [batch_size, 98*4, 5]
model.add(Convolution1D_Transpose(deconv_shape=deconv_shape, W_shape=W_shape, b_shape=b_shape, strides=strides, padding=padding))
model.add(Activation('relu'))
deconv_shape = [batch_size, 98*8, 1] ## 98 * 8 ist OUTPUT ROWS, 1 Output COLS!
W_shape = [2, 1, 5]
b_shape = [1]
model.add(Convolution1D_Transpose(deconv_shape=deconv_shape, W_shape=W_shape, b_shape=b_shape, strides=strides, padding=padding))
model.add(Activation('relu'))
#model.add(Reshape((1,28,28)))
print(model.summary())
model.compile(loss='mse', optimizer='rmsprop')
print("Before FIT")
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1)
score = model.evaluate(X_test, Y_test, verbose=0, batch_size=batch_size)
print('Test score:', score)
y_pred = model.predict(X_test[0:batch_size], verbose=0, batch_size=batch_size)
for i in range(batch_size):
plt.imsave(arr=y_pred[i].reshape((28,28)),fname='number_'+str(i)+'_is_'+str(y_test[i])+'.png')
#print ('Number: ',i,' is ', y_test[i])
# if your machine has a display attached, you can use this instead (better graphics)
#imgplot = plt.imshow(y_pred[0].reshape((28,28)))
#plt.savefig('new_run_'+str(i)+'.png'
save_neuralnet (model, 'mnistauto')
| 3.0625
| 3
|
aio/cluster/zk_client.py
|
eigenphi/gcommon
| 3
|
12778858
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# created: 2015-04-22
"""ZooKeeper 客户端。
和 asyncio 联用时请注意:
所有 watch observer 必须使用 reactor.callFromThread() 将 watch 结果返回给 twisted 线程。
为调用方便,请使用 twisted_kazoo.twisted_callback 对回调进行封装。
"""
import logging
import threading
from queue import Queue
from kazoo.client import KazooClient
from kazoo.handlers.threading import KazooTimeoutError
from kazoo.protocol.states import KazooState, KeeperState
from gcommon.aio import gasync
from gcommon.utils.gnet import ConnectionStatus
logger = logging.getLogger('kazoo')
class ZookeeperObserver(object):
ZK_Conn_Connecting = 'CONNECTING'
def __init__(self):
self._client_manager = None
self._kazoo_client = None
self._conn_status = ConnectionStatus.Initialized
self._zk_conn_status = self.ZK_Conn_Connecting
def set_client_manager(self, client_manager):
self._client_manager = client_manager
self._kazoo_client = self._client_manager.kazoo_client
def on_connection_failed(self, reason=None):
"""Client Manager 回调"""
logger.error('cannot connect to zookeeper, reason: %s', reason)
self._conn_status = ConnectionStatus.Closed
self._zk_conn_status = KazooState.LOST
gasync.run_in_main_thread(self._on_conn_failed)
def _on_conn_opened(self):
"""连接打开或者恢复"""
pass
def _on_conn_lost(self):
"""会话断开"""
pass
def _on_conn_suspended(self):
"""连接断开,会话挂起,尝试恢复中"""
pass
def _on_conn_failed(self):
"""第一次连接失败,无法建立会话"""
pass
def on_connection_status_changed(self, state):
"""在 ZK 的独立线程中调用(禁止在主线程调用)"""
logger.debug('connection status changed from %s to %s', self._zk_conn_status, state)
self._zk_conn_status = state
if state == KazooState.CONNECTED:
if self._kazoo_client.client_state == KeeperState.CONNECTED_RO:
logger.debug("Read only mode!")
else:
logger.debug("Read/Write mode!")
self._conn_status = ConnectionStatus.Connected
gasync.run_in_main_thread(self._on_conn_opened)
elif state == KazooState.LOST:
logger.debug('kazoo connection lost (client closed)')
self._conn_status = ConnectionStatus.Closed
gasync.run_in_main_thread(self._on_conn_lost)
elif state == KazooState.SUSPENDED:
logger.debug('kazoo connection suspended (maybe the server is gone)')
self._conn_status = ConnectionStatus.Suspended
gasync.run_in_main_thread(self._on_conn_suspended)
class _ZookeeperClientThread(threading.Thread):
"""运行 kazoo 客户端的专用线程。"""
def __init__(self, client):
threading.Thread.__init__(self, daemon=True)
self._client = client
def run(self):
logger.info('enter kazoo thread')
self._client.thread_main()
logger.info('leave kazoo thread')
class ZookeeperClient(object):
"""Kazoo 客户端管理器,用于管理 zk connection 和跨线程通信。
不处理任何实际业务。处理业务的是 ZookeeperService.
"""
def __init__(self, observer, server_addr):
self._observer = observer
self._kazoo_client = KazooClient(hosts=server_addr)
self._q_service_control = Queue()
self._is_running = True
self._thread = _ZookeeperClientThread(self)
@property
def kazoo_client(self):
return self._kazoo_client
def is_running(self):
return self._is_running
def send_control_message(self, message):
"""发送控制消息,控制消息必须在客户端的启动线程中处理"""
self._q_service_control.put(message)
def _process_service_control_message(self):
"""处理控制消息"""
message = self._q_service_control.get()
logger.debug('process control message: %s', message)
if message == "stop":
self._is_running = False
self._kazoo_client.stop()
def start(self):
"""启动独立线程运行 zookeeper 客户端 - 主线程调用"""
assert gasync.AsyncThreads.is_main_loop()
logger.info('start kazoo client')
self._kazoo_client.add_listener(self._observer.on_connection_status_changed)
self._thread.start()
def stop(self):
logger.info('stop kazoo client')
self.send_control_message('stop')
def wait(self):
logger.info('wait kazoo client exiting')
self._thread.join()
logger.info('kazoo client stopped')
def thread_main(self):
"""尝试连接服务器,如果多次连接失败则抛出超时错"""
try:
self._kazoo_client.start()
except KazooTimeoutError as e:
self._observer.on_connection_failed(e)
return
except Exception as e:
self._observer.on_connection_failed(e)
return
while self.is_running():
self._process_service_control_message()
def create_lock(self, node_root, node_name):
return KazooLock(self._kazoo_client, node_root, node_name)
class KazooLock(object):
def __init__(self, client: KazooClient, node_root, node_name):
self._kazoo_client = client
self._node_root = node_root
self._node_name = node_name
self._node_path = f"{node_root}/{node_name}."
self._full_path = ""
self._locked = False
async def acquire(self):
result = self._kazoo_client.create(
self._node_path, b"", makepath=True, ephemeral=True, sequence=True
)
event = gasync.AsyncEvent()
@gasync.callback_run_in_main_thread
def _on_lock_nodes_changed(nodes):
if not nodes:
return
nodes.sort(key=lambda x: x.split(".")[1], reverse=False)
name, _sequence = nodes[0].split(".")
if name == self._node_name:
self._full_path = f"{self._node_root}/{nodes[0]}"
event.notify(True)
self._kazoo_client.ChildrenWatch(self._node_root, _on_lock_nodes_changed)
await event.wait()
return self
def release(self):
try:
self._kazoo_client.delete(self._full_path)
except:
logger.fatal("kazoo lock release error, %s", self._node_path)
raise
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, exc_type, exc_val, exc_tb):
self.release()
| 2.15625
| 2
|
glue_jupyter/table/tests/test_table.py
|
ibusko/glue-jupyter
| 57
|
12778859
|
from glue_jupyter.table import TableViewer
def test_table_filter(app, dataxyz):
table = app.table(data=dataxyz)
assert len(table.layers) == 1
assert table.widget_table is not None
table.widget_table.checked = [1]
table.apply_filter()
assert len(table.layers) == 2
subset = table.layers[1].layer
assert table.widget_table.selections == [subset.label]
assert [k['text'] for k in table.widget_table.headers_selections] == [subset.label]
assert table.widget_table.selection_colors == [subset.style.color]
app.subset('test', dataxyz.id['x'] > 1)
assert len(table.layers) == 3
assert len(table.widget_table.selections) == 2
def test_table_add_remove_data(app, dataxyz, dataxz):
table = app.new_data_viewer(TableViewer, data=None, show=True)
assert len(table.layers) == 0
assert table.widget_table.total_length == 0
table.add_data(dataxyz)
assert table.widget_table.items, "table should fill automatically"
assert table.widget_table.items[0]['z'] == dataxyz['z'][0]
assert table.widget_table.total_length, "total length should grow"
assert dataxz['z'][0] != dataxyz['z'][0], "we assume this to check data changes in the table"
table.add_data(dataxz)
assert table.widget_table.data is dataxz
assert table.widget_table.items[0]['z'] == dataxz['z'][0]
assert len(table.layers) == 2
table.remove_data(dataxz)
assert table.widget_table.data is dataxyz
assert table.widget_table.items[0]['z'] == dataxyz['z'][0]
assert len(table.layers) == 1
table.remove_data(dataxyz)
assert table.widget_table.data is None
assert table.widget_table.items == []
assert len(table.layers) == 0
| 2.5
| 2
|
modules/other_ns.py
|
nam-pi/data_conversion
| 1
|
12778860
|
<reponame>nam-pi/data_conversion
from rdflib import Namespace
class Other_ns:
geonames = Namespace("https://sws.geonames.org/")
gnd = Namespace("https://d-nb.info/gnd/")
wikidata = Namespace("https://www.wikidata.org/entity/")
| 2.0625
| 2
|
csv-manipulation.py
|
ismailfaruk/COMP551-mp1
| 1
|
12778861
|
import pandas as pd
import sklearn.datasets as datasets
import pandas_ml as pdml
import numpy as np
# READ DATASETS AND CLEAR NAN COLUMNS AND ROWS
search_data = pd.read_csv(
"c:/Users/<NAME>/Desktop/Arya/comp551/2020_US_weekly_symptoms_dataset.csv", sep=',',
header=0, engine='python')
search_data.dropna(axis="columns", how="all", inplace=True)
search_data.dropna(axis="rows", how="all", thresh=12, inplace=True)
search_data.dropna(axis="columns", how="all", inplace=True)
search_data['date'] = pd.to_datetime(search_data['date'])
search_data['week'] = search_data['date'].dt.isocalendar().week
search_data.sort_values(['open_covid_region_code', 'week'],
ascending=[True, True], inplace=True)
print(search_data.shape)
search_data.to_csv(
'c:/Users/<NAME>/Desktop/Arya/comp551/cleared_search.csv', index=0)
agg_data = pd.read_csv(
"c:/Users/<NAME>/Desktop/Arya/comp551/aggregated_cc_by.csv", sep=',',
header=0, engine='python')
# agg_data.dropna(axis="columns", how="all", inplace=True)
# EXTRACT USA DATA FROM DATASET 2
indexNames = agg_data[agg_data['open_covid_region_code'].str.find(
'US-') < 0].index
agg_data.drop(indexNames, inplace=True)
agg_data['date'] = pd.to_datetime(agg_data['date'])
indexNames2 = agg_data[agg_data['date'] < '2020-01-06'].index
agg_data.drop(indexNames2, inplace=True)
agg_data.dropna(axis="columns", how="all", inplace=True)
# # CONVERT DAILY DATA TO WEEKLY
agg_data['week'] = agg_data['date'].dt.isocalendar().week
agg_data.fillna(0, inplace=True)
print(agg_data.shape)
logic = {
# 'open_covid_region_code': 'first',
# 'region_name': 'first',
# 'cases_cumulative': 'last',
# 'cases_new': 'sum',
# 'cases_cumulative_per_million': 'last',
# 'cases_new_per_million': 'sum',
# 'deaths_cumulative': 'last',
# 'deaths_new': 'sum',
# 'deaths_cumulative_per_million': 'last',
# 'deaths_new_per_million': 'sum',
# 'tests_new': 'sum',
# 'tests_cumulative': 'last',
# 'tests_cumulative_per_thousand': 'last',
# 'tests_new_per_thousand': 'sum',
# 'test_units': 'last',
# 'hospitalized_current': 'mean',
'hospitalized_new': 'sum',
'hospitalized_cumulative': 'last',
# 'discharged_new': 'sum',
# 'discharged_cumulative': 'last',
# 'icu_current': 'mean',
# 'icu_cumulative': 'last',
# 'ventilator_current': 'mean',
# 'school_closing': 'max',
# 'school_closing_flag': 'max',
# 'workplace_closing': 'max',
# 'workplace_closing_flag': 'max',
# 'cancel_public_events_flag': 'max',
# 'restrictions_on_gatherings': 'max',
# 'restrictions_on_gatherings_flag': 'max',
# 'close_public_transit': 'max',
# 'close_public_transit_flag': 'max',
# 'stay_at_home_requirements': 'max',
# 'stay_at_home_requirements_flag': 'max',
# 'restrictions_on_internal_movement': 'max',
# 'restrictions_on_internal_movement_flag': 'max',
# 'international_travel_controls': 'max',
# 'income_support': 'max',
# 'income_support_flag': 'max',
# 'debt_contract_relief': 'max',
# 'fiscal_measures': 'max',
# 'international_support': 'max',
# 'public_information_campaigns': 'max',
# 'public_information_campaigns_flag': 'max',
# 'testing_policy': 'max',
# 'contact_tracing': 'max',
# 'emergency_investment_in_healthcare': 'max',
# 'investment_in_vaccines': 'max',
# 'wildcard': 'max',
# 'confirmed_cases': 'last',
# 'confirmed_deaths': 'last',
# 'stringency_index': 'max',
# 'stringency_index_for_display': 'max',
# 'stringency_legacy_index': 'max',
# 'stringency_legacy_index_for_display': 'max',
# 'government_response_index': 'max',
# 'government_response_index_for_display': 'max',
# 'containment_health_index': 'max',
# 'containment_health_index_for_display': 'max',
# 'economic_support_index': 'max',
# 'economic_support_index_for_display': 'max'
}
df1 = agg_data.groupby(
['open_covid_region_code', 'week'], as_index=False).agg(logic)
print(df1.shape)
df1.to_csv('c:/Users/<NAME>/Desktop/Arya/comp551/cleared_agg.csv')
df2 = pd.merge(left=search_data, right=df1,
on=['open_covid_region_code', 'week'])
df2.to_csv('c:/Users/<NAME>/Desktop/Arya/comp551/merged_data.csv', index=0)
print(df2.shape)
# SET TARGET AND NORMALIZE DATA
# dataframe = pdml.ModelFrame(df2.to_dict(orient='list'))
| 2.828125
| 3
|
multimodal/db/models/sound.py
|
omangin/multimodal
| 17
|
12778862
|
<filename>multimodal/db/models/sound.py
# -*- coding: utf-8 -*-
__author__ = '<NAME> <<EMAIL>>'
__date__ = '10/2011'
import os
import json
class Record:
def __init__(self, db, speaker, audio, tags,
transcription, style):
self.db = db
self.spkr_id = speaker
self.audio = audio # Name of audio file
self.tags = tags # tag indices
self.trans = transcription
self.style = style
def __lt__(self, other):
assert isinstance(other, Record)
return self.audio < other.audio
def __le__(self, other):
assert isinstance(other, Record)
return self == other or self < other
def __gt__(self, other):
return not self <= other
def __ge__(self, other):
return not self < other
def __str__(self):
return "<\"{}\", {}, tags: {} ({})>".format(
self.trans,
self.audio,
[self.db.tags[t] for t in self.tags],
self.db.spkrs[self.spkr_id],
)
def __repr__(self):
return self.__str__()
def get_tag_names(self):
return [self.db.tags[i] for i in self.tags]
def get_audio_path(self):
return os.path.join(self.db.get_wav_dir(self.spkr_id), self.audio)
def to_dict(self):
return {'audio_file': self.audio,
'tags': self.get_tag_names(),
'transcription': self.trans,
'style': self.style,
}
def getDom(self, doc):
rd = doc.createElement('record')
rd.setAttribute('style', self.style)
# Store audio file
audio = doc.createElement('audio')
audio.appendChild(doc.createTextNode(self.audio))
rd.appendChild(audio)
# Store transcription
trans = doc.createElement('trans')
trans.appendChild(doc.createTextNode(self.trans))
rd.appendChild(trans)
# Store tags
for t in self.get_tag_names():
tag = doc.createElement('tag')
tag.setAttribute('name', t)
rd.appendChild(tag)
return rd
@classmethod
def from_dict(cls, db, speaker_id, d):
return cls(db, speaker_id, d.get('audio_file', None),
[db.get_tag_add(t) for t in d.get('tags', [])],
d.get('transcription', None), d.get('style', None))
class DataBase:
WAV_DIR = '' # Directory for wav files
def __init__(self):
# Init record list, organized by speaker
self.records = []
# Init tag list:
# tags are strings to which an index is associated
# from their order of addition
self.tags = []
self.tag_id = {}
self.root = None
self.spkrs = []
self.spkrs_info = []
def has_tag(self, tag):
return tag in self.tag_id
def add_tag(self, tag):
if self.has_tag(tag):
raise ValueError("Existing tag: %s" % tag)
self.tags.append(tag)
tagid = len(self.tags) - 1
self.tag_id[tag] = tagid
return tagid
def get_tag_add(self, tag):
"""Return tag id and creates it if necessary.
"""
if not self.has_tag(tag):
tagid = self.add_tag(tag)
else:
tagid = self.tag_id[tag]
return tagid
def sort(self):
for r in self.records:
r.sort()
def get_wav_dir(self, speaker_id):
return os.path.join(self.root, self.WAV_DIR,
self.spkrs[speaker_id])
def write_json(self, filename):
data = {'root': self.root,
'tags': self.tags,
'speakers': []
}
for speaker, info, records in zip(self.spkrs, self.spkrs_info,
self.records):
speaker_records = [r.to_dict() for r in records]
data['speakers'].append({
'name': speaker,
'info': info,
'records': speaker_records,
})
with open(filename, 'w') as f:
json.dump(data, f, indent=2)
def load_from(self, filename, sort=True):
with open(filename, 'r') as f:
data = json.load(f)
self.root = data['root']
for t in data.get('tags', []):
self.add_tag(t)
for s in data['speakers']:
spk_id = self.add_speaker(s['name'], s.get('info', None))
for r in s['records']:
r = Record.from_dict(self, spk_id, r)
self.records[-1].append(r)
if sort:
self.sort()
def add_speaker(self, name, info=None):
if name in self.spkrs:
raise ValueError('There is already a speaker with the same name.')
self.records.append([])
self.spkrs.append(name)
self.spkrs_info.append(info)
return len(self.spkrs) - 1
def add_record(self, record):
if record.db is not self:
raise ValueError("Record belongs to another db.")
for t in record.tags:
if t > len(self.tags):
raise ValueError("Record contains invalid tags.")
self.records[record.spkr_id].append(record)
def size(self):
return sum(map(len, self.records))
def __str__(self):
return ("%s records for %s speakers and %s keywords"
% (self.size(), len(self.records), len(self.tags),))
def count_by_keywords(self):
nb_kw = len(self.tags)
counts = [[0 for _ in range(nb_kw)] for __ in range(nb_kw)]
for s in self.records:
for r in s:
for t in r.tags:
for u in r.tags:
if t != u:
counts[t][u] += 1
return counts
def all_records(self):
for s in self.records:
for r in s:
yield r
def statistics(self, display=True):
print("=======================")
print("* Database statistics *")
print("=======================")
print(self.__str__())
print("Records by speaker: %s" % ", ".join(map(len, self.records)))
counts = [0 for _ in self.tags]
for r in self.all_records():
for t in r.tags:
counts[t] += 1
print("Records by keywords: %s" % ", ".join(zip(self.tags, counts)))
| 2.765625
| 3
|
src/infi/storagemodel/aix/rescan.py
|
Infinidat/infi.storagemodel
| 6
|
12778863
|
from infi.execute import execute_assert_success
from .scsi import AixModelMixin, AixSCSIBlockDevice
from .native_multipath import AixMultipathBlockDevice
from infi.storagemodel.errors import DeviceError
class AixRescan(AixModelMixin):
def _add_new_devices(self):
execute_assert_success(["cfgmgr"])
def _get_all_devices(self, multipath):
klass = AixSCSIBlockDevice if not multipath else AixMultipathBlockDevice
devices = [klass(dev) for dev in self._get_dev_by_class("dac")] + \
[klass(dev) for dev in self._get_dev_by_class("disk")]
multipath_devices = self._get_multipath_devices()
filter_in = lambda dev: dev.get_display_name() in multipath_devices
filter_out = lambda dev: dev.get_display_name() not in multipath_devices
return list(filter(filter_in if multipath else filter_out, devices))
def _do_report_luns(self, device_name):
from infi.asi.executers import aix as aix_executer
from infi.asi.coroutines.sync_adapter import sync_wait as _sync_wait
from infi.asi.cdb.report_luns import ReportLunsCommand
device = "/dev/{}" + device_name
select_report = 0
with aix_executer(device) as executer:
command = ReportLunsCommand(select_report=int(select_report))
result = _sync_wait(command.execute(executer))
return result.lun_list
def _remove_missing_scsi_devices(self):
devices = self._get_all_devices(False)
# go over all devices, build a dict that contains: hct -> dict of lun->device-name
hcts = dict()
for device in devices:
hctl = device.get_hctl()
hct = (hctl.get_host(), hctl.get_channel(), hctl.get_target())
hct_luns = hcts[hct].setdefault(dict())
hct_luns[hctl.get_lun()] = device.get_display_name()
# do SCSI report luns on lun 0 of each hct, then remove the luns we see that are not returned
for hct, hct_luns in hcts.values():
lun0_device = hct_luns[0] # LUN 0 must exist
actual_luns = self._do_report_luns(lun0_device)
missing_luns = set(hct_luns.keys()) - set(actual_luns)
for missing_lun in missing_luns:
dev_name = hct_luns[missing_lun]
execute_assert_success(["rmdev", "-dl", dev_name])
def _remove_missing_multipath_devices(self):
devices = self._get_all_devices(True)
for device in devices:
try:
# try to send an IO to make the OS refresh the state path
device.get_scsi_standard_inquiry()
except DeviceError:
pass
paths_states = {path: path.get_state() for path in device.get_paths()}
if all(state == "down" for state in paths_states.values()):
execute_assert_success(["rmdev", "-dl", device.get_display_name()])
continue
for path, path_state in paths_states.items():
if path_state == "down":
execute_assert_success(["rmpath", "-dl", device.get_display_name(), "-i", path.get_path_id()])
def rescan(self):
self._add_new_devices()
# TODO: The logic here is bad... We use information from the OS instead of checking the fabric itself.
# for multipath devices we assume the "state" of the paths is updated
# for scsi devices it's even worse, because we need 'get_hctl' when going over the devices, which uses
# the ODM to find the target and LUN. This will fail for devices that are not defined - so for now
# we don't remove missing SCSI devices and we assume the OS information is updated for multipath devices...
# self._remove_missing_scsi_devices()
self._remove_missing_multipath_devices()
| 2.046875
| 2
|
app/main/forms.py
|
zs3189/web_flask
| 0
|
12778864
|
from flask_wtf import Form
from wtforms import StringField, TextAreaField, BooleanField, SelectField,\
SubmitField,StringField
from wtforms.validators import DataRequired, Length, Email, Regexp
from wtforms import ValidationError
from flask_pagedown.fields import PageDownField
from ..models import Role, User,BID_action
from wtforms import StringField, PasswordField, BooleanField, SubmitField,FieldList,FileField,SelectField
class NameForm(Form):
name = StringField('What is your name?', validators=[DataRequired()])
submit = SubmitField('Submit')
class BID_dataForm(Form):
# IDnumber = SelectField("设备类型", choices=[('手持机', '手持机'), ('脚扣', '脚扣')])
IDnumber = StringField("身份证号", validators=[DataRequired(),Length(18),Regexp('^[0-9](X|x){0,1}',message=u'请输入正确的身份证号')])
BIDnumber = StringField("标书号", validators=[DataRequired(),Length(8),Regexp('^[0-9]',message=u'请输入正确的标书号')])
BIDpassword = StringField("标书密码", validators=[DataRequired(),Length(4),Regexp('^[0-9]',message=u'请输入正确的标书密码')])
action_user = SelectField('拍手选择:', coerce=int)
# 提交按钮
submit = SubmitField('创建标书号')
def __init__(self, user, *args, **kwargs):
super(BID_dataForm, self).__init__(*args, **kwargs)
self.action_user.choices = [(user.id, user.username)
for user in User.query.order_by(User.username).all()]
self.user = user
class BID_actionForm(Form):
diff_choices=[(i*100+400,i*100+400) for i in range(12)]
refer_time_choices=[(i+40,i+40) for i in range(16)]
bid_time_choices=[(i+54,i+54) for i in range(2)]
delay_time_choices=[(i*0.1,i*0.1) for i in range(10)]
ahead_price_choices=[(i*100,i*100) for i in range(4)]
diff = SelectField(u"相差价格",coerce=int, choices=diff_choices) #参考时间差价
refer_time = SelectField(u"参考价格时间",coerce=int,choices=refer_time_choices,default=(50,50)) #参考时间
bid_time = SelectField(u"出价时间",coerce=int,choices=bid_time_choices,default=(55,55)) #出价截止时间
delay_time = SelectField(u"出价延迟",coerce=float, choices=delay_time_choices) #出价延迟时间,0.1~0.9
ahead_price = SelectField(u"出价提前",coerce=int,choices=ahead_price_choices,default=(100,100)) #出价提前价格
action_user = SelectField('拍手选择:', coerce=int)
# 提交按钮
submit = SubmitField('创建策略')
def __init__(self, user, *args, **kwargs):
super(BID_actionForm, self).__init__(*args, **kwargs)
self.action_user.choices = [(user.id, user.username)
for user in User.query.order_by(User.username).all()]
self.user = user
class Edit_BID_dataForm(Form):
# IDnumber = SelectField("设备类型", choices=[('手持机', '手持机'), ('脚扣', '脚扣')])
IDnumber = StringField("身份证号", validators=[DataRequired(),Length(18),Regexp('^[0-9](X|x){0,1}',message=u'请输入正确的身份证号')])
BIDnumber = StringField("标书号", validators=[DataRequired(),Length(8),Regexp('^[0-9]',message=u'请输入正确的标书号')])
BIDpassword = StringField("标书密码", validators=[DataRequired(),Length(4),Regexp('^[0-9]',message=u'请输入正确的标书密码')])
action_user = SelectField('拍手选择:', coerce=int)
# 提交按钮
submit = SubmitField('提交修改')
delete = SubmitField('删除')
def __init__(self, user, *args, **kwargs):
super(Edit_BID_dataForm, self).__init__(*args, **kwargs)
self.action_user.choices = [(user.id, user.username)
for user in User.query.order_by(User.username).all()]
self.user = user
class Edit_BID_actionForm(Form):
diff_choices=[(i*100+400,i*100+400) for i in range(12)]
refer_time_choices=[(i+40,i+40) for i in range(16)]
bid_time_choices=[(i+54,i+54) for i in range(2)]
delay_time_choices=[(i*0.1,i*0.1) for i in range(10)]
ahead_price_choices=[(i*100,i*100) for i in range(4)]
diff = SelectField(u"相差价格",coerce=int, choices=diff_choices) #参考时间差价
refer_time = SelectField(u"参考价格时间",coerce=int,choices=refer_time_choices) #参考时间
bid_time = SelectField(u"出价时间",coerce=int,choices=bid_time_choices) #出价截止时间
delay_time = SelectField(u"出价延迟",coerce=float, choices=delay_time_choices) #出价延迟时间,0.1~0.9
ahead_price = SelectField(u"出价提前",coerce=int,choices=ahead_price_choices) #出价提前价格
action_user = SelectField('拍手选择:', coerce=int)
# 提交按钮
submit = SubmitField(u'提交修改')
delete = SubmitField(u'删除策略')
def __init__(self, user, *args, **kwargs):
super(Edit_BID_actionForm, self).__init__(*args, **kwargs)
self.action_user.choices = [(user.id, user.username)
for user in User.query.order_by(User.username).all()]
self.user = user
###文件上传
class FileForm(Form):
file1=FileField('第一次出价')
file2=FileField('最后一次出价')
file3=FileField('结果')
file4=FileField('出价视频')
submit=SubmitField('Submit')
###查询
class InquiryForm(Form):
keyword=StringField('内容')
submit=SubmitField('查询')
#------------------------------------------停用
class EditProfileForm(Form):
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
class EditProfileAdminForm(Form):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
username = StringField('Username', validators=[
DataRequired(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
confirmed = BooleanField('Confirmed')
role = SelectField('Role', coerce=int)
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name)
for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if field.data != self.user.email and \
User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if field.data != self.user.username and \
User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class CommentForm(Form):
body = StringField('Enter your comment', validators=[DataRequired()])
submit = SubmitField('Submit')
####修改过
class BulletinForm(Form):
dt=StringField('时间')
price=StringField('价格',validators=[DataRequired()])
names = FieldList(StringField('名称'), label='物品列表', min_entries=1)
| 2.421875
| 2
|
python/dataset.py
|
francois-rozet/adopptrs
| 11
|
12778865
|
#!/usr/bin/env python
"""
PyTorch datasets and data augmenters
"""
###########
# Imports #
###########
import cv2
import numpy as np
import os
import random
import torch
from PIL import Image, ImageFilter
from torch.utils import data
from torchvision import transforms
#############
# Functions #
#############
def to_pil(tensor):
'''Converts a tensor to a PIL image.'''
return transforms.functional.to_pil_image(tensor)
def to_tensor(pic):
'''Converts a PIL image to a tensor.'''
return transforms.functional.to_tensor(pic)
def to_mask(shape, polygons):
'''Builds a mask based on polygon annotations.'''
contours = [np.array(p, dtype=int) for p in polygons]
mask = np.zeros(shape, dtype=np.uint8)
cv2.drawContours(mask, contours, -1, color=255, thickness=-1)
return Image.fromarray(mask)
def to_contours(mask):
'''Converts a mask into OpenCV contours.'''
mask = np.array(mask)
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours
def clusterize(polygons, size):
'''Clusterize polygons.'''
clusters = {}
for polygon in polygons:
temp = np.array(polygon).astype(int)
xmin = np.amin(temp[:, 0]) // size
xmax = np.amax(temp[:, 0]) // size
ymin = np.amin(temp[:, 1]) // size
ymax = np.amax(temp[:, 1]) // size
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
key = x * size, y * size
if not key in clusters:
clusters[key] = []
clusters[key].append(polygon)
return clusters
###########
# Classes #
###########
class VIADataset(data.IterableDataset):
'''Iterable VIA dataset.'''
def __init__(self, via, path='./', size=256, shuffle=False, shift=0, full=False, alt=0):
self.via = {}
self.masks = {}
self.clusters = {}
self.size = size
for key, polygons in via.items():
imagename = os.path.join(path, key)
if os.path.exists(imagename):
image = Image.open(imagename)
self.via[imagename] = polygons
self.masks[imagename] = to_mask((image.height, image.width), polygons)
if self.size is not None:
self.clusters[imagename] = clusterize(polygons, self.size)
self.shuffle = shuffle # random order
self.shift = shift # random shift
self.full = full # all sub-images
self.alt = alt # alternate
def __len__(self):
if self.size is None:
return len(self.via)
elif self.full:
s = 0
for imagename in self.via:
image = Image.open(imagename)
s += (image.width // self.size) * (image.height // self.size)
return s
else:
return sum(map(len, self.clusters.values())) * (1 + self.alt)
def __iter__(self):
images = random.sample(
self.via.keys(),
len(self.via)
) if self.shuffle else self.via.keys()
for imagename in images:
image = Image.open(imagename).convert('RGB')
mask = self.masks[imagename]
if self.size is None:
yield image, mask
elif self.full:
for left in np.arange(0, image.width, self.size):
for upper in np.arange(0, image.height, self.size):
box = (left, upper, left + self.size, upper + self.size)
yield image.crop(box), mask.crop(box)
else:
clusters = list(self.clusters[imagename].keys())
if self.shuffle:
random.shuffle(clusters)
for left, upper in clusters:
# Shift
if self.shift > 0:
left += random.randint(-self.shift, self.shift)
upper += random.randint(-self.shift, self.shift)
# Out of bounds
left = min(left, image.width - self.size)
upper = min(upper, image.height - self.size)
box = (left, upper, left + self.size, upper + self.size)
yield image.crop(box), mask.crop(box)
# Alternate with random images
for _ in range(self.alt):
left = random.randrange(image.width - self.size)
upper = random.randrange(image.height - self.size)
box = (left, upper, left + self.size, upper + self.size)
yield image.crop(box), mask.crop(box)
class RandomChoice(data.IterableDataset):
'''Apply a randomly picked transformation to each pair (input, target).'''
def __init__(self, dataset, transforms, input_only=False):
super().__init__()
self.dataset = dataset
self.transforms = transforms
self.input_only = input_only
def __len__(self):
return len(self.dataset)
def __iter__(self):
for input, target in self.dataset:
f = random.choice(self.transforms)
yield f(input), target if self.input_only else f(target)
class ColorJitter(RandomChoice):
'''Color jitter.'''
def __init__(self, dataset, brightness=0.25, contrast=0.33, saturation=0.33, hue=0):
super().__init__(
dataset=dataset,
transforms=[transforms.ColorJitter(brightness, contrast, saturation, hue)],
input_only=True
)
class RandomFilter(RandomChoice):
'''Random image filter.'''
def __init__(self, dataset):
super().__init__(
dataset=dataset,
transforms=[
lambda x: x,
lambda x: x.filter(ImageFilter.BLUR),
lambda x: x.filter(ImageFilter.DETAIL),
lambda x: x.filter(ImageFilter.EDGE_ENHANCE),
lambda x: x.filter(ImageFilter.SMOOTH),
lambda x: x.filter(ImageFilter.SHARPEN)
],
input_only=True
)
class RandomTranspose(RandomChoice):
'''Random image transpose.'''
def __init__(self, dataset):
super().__init__(
dataset=dataset,
transforms=[
lambda x: x,
lambda x: x.transpose(Image.FLIP_LEFT_RIGHT),
lambda x: x.transpose(Image.FLIP_TOP_BOTTOM),
lambda x: x.transpose(Image.ROTATE_90),
lambda x: x.transpose(Image.ROTATE_180),
lambda x: x.transpose(Image.ROTATE_270),
lambda x: x.transpose(Image.TRANSPOSE)
],
input_only=False
)
class Scale(RandomChoice):
'''Scale image.'''
def __init__(self, dataset, scale):
super().__init__(
dataset=dataset,
transforms=[lambda x: x.resize(
(int(x.width * scale), int(x.height * scale))
)],
input_only=False
)
class ToTensor(RandomChoice):
'''To Tensor.'''
def __init__(self, dataset):
super().__init__(
dataset=dataset,
transforms=[to_tensor],
input_only=False
)
########
# Main #
########
if __name__ == '__main__':
# Imports
import argparse
import json
import via as VIA
# Arguments
parser = argparse.ArgumentParser(description='Format California annotations to the VIA format')
parser.add_argument('-e', '--ext', default='.tif', help='extension of the images')
parser.add_argument('-o', '--output', default='../products/json/california.json', help='output VIA file')
parser.add_argument('-p', '--path', default='../resources/california/', help='path to California resources')
args = parser.parse_args()
# Polygons
with open(os.path.join(args.path, 'SolarArrayPolygons.json'), 'r') as f:
panels = json.load(f)['polygons']
# VGG Image Annotations
via = {}
for panel in panels:
filename = panel['image_name'] + args.ext
polygon = panel['polygon_vertices_pixels']
## Skip dots and lines
if not len(polygon) > 3:
continue
## Add polygon
if filename not in via:
via[filename] = []
via[filename].append(polygon)
# Save
VIA.dump(via, args.output, path=args.path)
| 2.984375
| 3
|
store/views/my_order.py
|
UniqueQueue/ethereum-store
| 0
|
12778866
|
import logging
import random
from django.conf import settings
from django.db import IntegrityError
from django.db.models import Q
from rest_access_policy import AccessPolicy
from rest_framework import viewsets
from store.const import ORDER_IDS_SESSION_PARAM_NAME
from store.models import Order
from store.serializers import MyOrderSerializer
log = logging.getLogger(__name__)
class MyOrderAccessPolicy(AccessPolicy):
statements = [
{
"action": ["list", "retrieve"],
"principal": ["*"],
"effect": "allow",
"condition": "can_view_my_order",
},
{
"action": ["create", "update", "partial_update", "destroy"],
"principal": ["*"],
"effect": "allow",
"condition": "can_moderate_my_order",
},
]
@staticmethod
def can_view_my_order(request, view, action) -> bool:
return request.user.has_perm('store.view_my_order')
@staticmethod
def can_moderate_my_order(request, view, action) -> bool:
return request.user.has_perm('store.moderate_my_order')
@classmethod
def scope_queryset(cls, request, view, action, qs):
if request.user.is_anonymous:
qs = qs.filter(user=None) # anonymous is allowed to see anonymous orders only
if action == 'list':
order_ids = request.session.get(ORDER_IDS_SESSION_PARAM_NAME, [])
qs = qs.filter(id__in=order_ids)
else:
if action == 'list':
order_ids = request.session.get(ORDER_IDS_SESSION_PARAM_NAME, [])
qs = qs.filter(Q(user=request.user) | Q(id__in=order_ids, user=None))
else:
qs = qs.filter(Q(user=request.user) | Q(user=None))
return qs
class MyOrderView(viewsets.mixins.CreateModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.UpdateModelMixin,
viewsets.mixins.ListModelMixin,
viewsets.GenericViewSet):
permission_classes = (MyOrderAccessPolicy,)
queryset = Order.objects
serializer_class = MyOrderSerializer
filterset_fields = ['status']
ordering = ['id']
@property
def access_policy(self):
return self.permission_classes[0]
def get_queryset(self):
return self.access_policy.scope_queryset(
self.request, self, self.action, super().get_queryset()
)
def perform_create(self, serializer):
if not self.request.user.is_anonymous:
return super().perform_create(serializer)
# it is allowed to anyone to access an order created by an anonymous user
# sequential id generation is vulnerable to pickup attacks
# if one knows his ID, he can guess which one should be next
# let's make it harder to guess
for _ in range(settings.ANONYMOUS_ORDER_ID_GENERATION_ITERATIONS):
try:
random.seed()
serializer.validated_data['id'] = random.randint(*settings.ANONYMOUS_ORDER_ID_GENERATION_RANGE)
super().perform_create(serializer)
break
except IntegrityError:
pass
else:
log.exception('Unable to generate anonymous Order-ID.')
raise
self.request.session.setdefault(ORDER_IDS_SESSION_PARAM_NAME, []).append(serializer.instance.id)
self.request.session.modified = True
| 2.046875
| 2
|
src/compas_plotters/artists/polylineartist.py
|
mattiskoh/compas
| 0
|
12778867
|
<filename>src/compas_plotters/artists/polylineartist.py
from typing import Literal, Tuple, List
from matplotlib.lines import Line2D
from compas.geometry import Polyline
from compas_plotters.artists import Artist
Color = Tuple[float, float, float]
class PolylineArtist(Artist):
"""Artist for COMPAS polylines."""
zorder: int = 1000
def __init__(self,
polyline: Polyline,
draw_points: bool = True,
linewidth: float = 1.0,
linestyle: Literal['solid', 'dotted', 'dashed', 'dashdot'] = 'solid',
color: Color = (0, 0, 0)):
super(PolylineArtist, self).__init__(polyline)
self._mpl_line = None
self._point_artists = []
self.draw_points = draw_points
self.polyline = polyline
self.linewidth = linewidth
self.linestyle = linestyle
self.color = color
@property
def data(self) -> List[List[float]]:
return [point[:2] for point in self.polyline.points]
def draw(self) -> None:
x, y, _ = zip(* self.polyline.points)
line2d = Line2D(x, y,
linewidth=self.linewidth,
linestyle=self.linestyle,
color=self.color,
zorder=self.zorder)
self._mpl_line = self.plotter.axes.add_line(line2d)
if self.draw_points:
for point in self.polyline:
self._point_artists.append(self.plotter.add(point))
def redraw(self) -> None:
x, y, _ = zip(* self.polyline.points)
self._mpl_line.set_xdata(x)
self._mpl_line.set_ydata(y)
self._mpl_line.set_color(self.color)
self._mpl_line.set_linewidth(self.width)
| 2.96875
| 3
|
src/Program/Python/Testing/verifyOutputTest.py
|
smiths/swhs
| 2
|
12778868
|
import sys
sys.path.insert(0, '.')
import unittest
import load_params
import warnings
import verify_output
class TestVerifyOutput(unittest.TestCase):
def setUp(self):
self.params = load_params.load_params('test.in')
self.time = [0, 10, 20, 30]
self.tempW = [40, 42, 44, 46]
self.tempP = [40, 41.9, 43.8, 45.7]
def test_VO1(self):
eW = [0, 1000, 2000, 19800]
eP = [0, 1000, 2000, 5400]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert len(w) is 0
def test_VO2(self):
eW = [0, 1000, 2000, 19800]
eP = [0, 1000, 2000, 3000]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert issubclass(w[0].category, UserWarning)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the PCM output' +
' and the expected output based on the law of conservation of energy.\n') in str(w[0].message)
def test_VO3(self):
eW = [0, 1000, 2000, 3000]
eP = [0, 1000, 2000, 5400]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert issubclass(w[0].category, UserWarning)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the water ' +
'output and the expected output based on the law of conservation of energy.\n') in str(w[0].message)
def test_VO4(self):
eW = [0, 1000, 2000, 3000]
eP = [0, 1000, 2000, 3000]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert issubclass(w[0].category, UserWarning)
assert issubclass(w[1].category, UserWarning)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the water ' +
'output and the expected output based on the law of conservation of energy.\n') in str(w[0].message)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the PCM output' +
' and the expected output based on the law of conservation of energy.\n') in str(w[1].message)
class VerifyOutputSuite:
def suite(self):
suite = unittest.TestLoader().loadTestsFromTestCase(TestVerifyOutput)
return suite
| 2.765625
| 3
|
pyquil/quil.py
|
JansenZhao/GPNN
| 0
|
12778869
|
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
Module for creating and defining Quil programs.
"""
import warnings
from itertools import count
from math import pi
import numpy as np
from six import string_types
from pyquil._parser.PyQuilListener import run_parser
from pyquil.kraus import _check_kraus_ops, _create_kraus_pragmas
from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit
from .gates import MEASURE, STANDARD_GATES, H
from .quilbase import (DefGate, Gate, Measurement, Pragma, AbstractInstruction, Qubit,
Jump, Label, JumpConditional, JumpTarget, JumpUnless, JumpWhen, Addr)
class Program(object):
def __init__(self, *instructions):
self._defined_gates = []
# Implementation note: the key difference between the private _instructions and the public instructions
# property below is that the private _instructions list may contain placeholder values
self._instructions = []
# Performance optimization: as stated above _instructions may contain placeholder values so the program must
# first be synthesized. _synthesized_instructions is simply a cache on the result of the _synthesize() method.
# It is marked as None whenever new instructions are added.
self._synthesized_instructions = None
self.inst(*instructions)
@property
def defined_gates(self):
"""
A list of defined gates on the program.
"""
return self._defined_gates
@property
def instructions(self):
"""
Fill in any placeholders and return a list of quil AbstractInstructions.
"""
if self._synthesized_instructions is None:
self._synthesized_instructions = self._synthesize()
return self._synthesized_instructions
def inst(self, *instructions):
"""
Mutates the Program object by appending new instructions.
This function accepts a number of different valid forms, e.g.
>>> p = Program()
>>> p.inst(H(0)) # A single instruction
>>> p.inst(H(0), H(1)) # Multiple instructions
>>> p.inst([H(0), H(1)]) # A list of instructions
>>> p.inst(("H", 1)) # A tuple representing an instruction
>>> p.inst("H 0") # A string representing an instruction
>>> q = Program()
>>> p.inst(q) # Another program
It can also be chained:
>>> p = Program()
>>> p.inst(H(0)).inst(H(1))
:param instructions: A list of Instruction objects, e.g. Gates
:return: self for method chaining
"""
for instruction in instructions:
if isinstance(instruction, list):
self.inst(*instruction)
elif isinstance(instruction, tuple):
if len(instruction) == 0:
raise ValueError("tuple should have at least one element")
elif len(instruction) == 1:
self.inst(instruction[0])
else:
op = instruction[0]
if op == "MEASURE":
if len(instruction) == 2:
self.measure(instruction[1])
else:
self.measure(instruction[1], instruction[2])
else:
params = []
possible_params = instruction[1]
rest = instruction[2:]
if isinstance(possible_params, list):
params = possible_params
else:
rest = [possible_params] + list(rest)
self.gate(op, params, rest)
elif isinstance(instruction, string_types):
self.inst(run_parser(instruction.strip()))
elif isinstance(instruction, Program):
if id(self) == id(instruction):
raise ValueError("Nesting a program inside itself is not supported")
for defgate in instruction._defined_gates:
self.inst(defgate)
for instr in instruction._instructions:
self.inst(instr)
# Implementation note: these two base cases are the only ones which modify the program
elif isinstance(instruction, DefGate):
defined_gate_names = [gate.name for gate in self._defined_gates]
if instruction.name in defined_gate_names:
warnings.warn("Gate {} has already been defined in this program".format(instruction.name))
self._defined_gates.append(instruction)
elif isinstance(instruction, AbstractInstruction):
self._instructions.append(instruction)
self._synthesized_instructions = None
else:
raise TypeError("Invalid instruction: {}".format(instruction))
return self
def gate(self, name, params, qubits):
"""
Add a gate to the program.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param string name: The name of the gate.
:param list params: Parameters to send to the gate.
:param list qubits: Qubits that the gate operates on.
:return: The Program instance
:rtype: Program
"""
return self.inst(Gate(name, params, [unpack_qubit(q) for q in qubits]))
def defgate(self, name, matrix, parameters=None):
"""
Define a new static gate.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param string name: The name of the gate.
:param array-like matrix: List of lists or Numpy 2d array.
:param list parameters: list of parameters that are used in this gate
:return: The Program instance.
:rtype: Program
"""
return self.inst(DefGate(name, matrix, parameters))
def define_noisy_gate(self, name, qubit_indices, kraus_ops):
"""
Overload a static ideal gate with a noisy one defined in terms of a Kraus map.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param str name: The name of the gate.
:param tuple|list qubit_indices: The qubits it acts on.
:param tuple|list kraus_ops: The Kraus operators.
:return: The Program instance
:rtype: Program
"""
kraus_ops = [np.asarray(k, dtype=np.complex128) for k in kraus_ops]
_check_kraus_ops(len(qubit_indices), kraus_ops)
return self.inst(_create_kraus_pragmas(name, tuple(qubit_indices), kraus_ops))
def no_noise(self):
"""
Prevent a noisy gate definition from being applied to the immediately following Gate
instruction.
:return: Program
"""
return self.inst(Pragma("NO-NOISE"))
def measure(self, qubit_index, classical_reg=None):
"""
Measures a qubit at qubit_index and puts the result in classical_reg
:param int qubit_index: The address of the qubit to measure.
:param int classical_reg: The address of the classical bit to store the result.
:returns: The Quil Program with the appropriate measure instruction appended, e.g.
MEASURE 0 [1]
:rtype: Program
"""
return self.inst(MEASURE(qubit_index, classical_reg))
def measure_all(self, *qubit_reg_pairs):
"""
Measures many qubits into their specified classical bits, in the order
they were entered.
:param Tuple qubit_reg_pairs: Tuples of qubit indices paired with classical bits.
:return: The Quil Program with the appropriate measure instructions appended, e.g.
.. code::
MEASURE 0 [1]
MEASURE 1 [2]
MEASURE 2 [3]
:rtype: Program
"""
for qubit_index, classical_reg in qubit_reg_pairs:
self.inst(MEASURE(qubit_index, classical_reg))
return self
def while_do(self, classical_reg, q_program):
"""
While a classical register at index classical_reg is 1, loop q_program
Equivalent to the following construction:
.. code::
WHILE [c]:
instr...
=>
LABEL @START
JUMP-UNLESS @END [c]
instr...
JUMP @START
LABEL @END
:param int classical_reg: The classical register to check
:param Program q_program: The Quil program to loop.
:return: The Quil Program with the loop instructions added.
:rtype: Program
"""
label_start = LabelPlaceholder("START")
label_end = LabelPlaceholder("END")
self.inst(JumpTarget(label_start))
self.inst(JumpUnless(target=label_end, condition=Addr(classical_reg)))
self.inst(q_program)
self.inst(Jump(label_start))
self.inst(JumpTarget(label_end))
return self
def if_then(self, classical_reg, if_program, else_program=None):
"""
If the classical register at index classical reg is 1, run if_program, else run
else_program.
Equivalent to the following construction:
.. code::
IF [c]:
instrA...
ELSE:
instrB...
=>
JUMP-WHEN @THEN [c]
instrB...
JUMP @END
LABEL @THEN
instrA...
LABEL @END
:param int classical_reg: The classical register to check as the condition
:param Program if_program: A Quil program to execute if classical_reg is 1
:param Program else_program: A Quil program to execute if classical_reg is 0. This
argument is optional and defaults to an empty Program.
:returns: The Quil Program with the branching instructions added.
:rtype: Program
"""
else_program = else_program if else_program is not None else Program()
label_then = LabelPlaceholder("THEN")
label_end = LabelPlaceholder("END")
self.inst(JumpWhen(target=label_then, condition=Addr(classical_reg)))
self.inst(else_program)
self.inst(Jump(label_end))
self.inst(JumpTarget(label_then))
self.inst(if_program)
self.inst(JumpTarget(label_end))
return self
def alloc(self):
"""
Get a new qubit.
:return: A qubit.
:rtype: Qubit
"""
return QubitPlaceholder()
def out(self):
"""
Converts the Quil program to a readable string.
:return: String form of a program
:rtype: string
"""
s = ""
for dg in self._defined_gates:
s += dg.out()
s += "\n"
for instr in self.instructions:
s += instr.out() + "\n"
return s
def get_qubits(self):
"""
Returns all of the qubit indices used in this program, including gate applications and
allocated qubits. e.g.
>>> p = Program()
>>> p.inst(("H", 1))
>>> p.get_qubits()
{1}
>>> q = p.alloc()
>>> p.inst(H(q))
>>> len(p.get_qubits())
2
:return: A set of all the qubit indices used in this program
:rtype: set
"""
qubits = set()
for instr in self.instructions:
if isinstance(instr, Gate):
qubits |= {q.index for q in instr.qubits}
elif isinstance(instr, Measurement):
qubits.add(instr.qubit.index)
return qubits
def is_protoquil(self):
"""
Protoquil programs may only contain gates, no classical instructions and no jumps.
:return: True if the Program is Protoquil, False otherwise
"""
for instr in self._instructions:
if not isinstance(instr, Gate):
return False
return True
def pop(self):
"""
Pops off the last instruction.
:return: The instruction that was popped.
:rtype: tuple
"""
res = self._instructions.pop()
self._synthesized_instructions = None
return res
def dagger(self, inv_dict=None, suffix="-INV"):
"""
Creates the conjugate transpose of the Quil program. The program must not
contain any irreversible actions (measurement, control flow, qubit allocation).
:return: The Quil program's inverse
:rtype: Program
"""
if not self.is_protoquil():
raise ValueError("Program must be valid Protoquil")
daggered = Program()
for gate in self._defined_gates:
if inv_dict is None or gate.name not in inv_dict:
daggered.defgate(gate.name + suffix, gate.matrix.T.conj())
for gate in reversed(self._instructions):
if gate.name in STANDARD_GATES:
if gate.name == "S":
daggered.inst(STANDARD_GATES["PHASE"](-pi / 2, *gate.qubits))
elif gate.name == "T":
daggered.inst(STANDARD_GATES["RZ"](pi / 4, *gate.qubits))
elif gate.name == "ISWAP":
daggered.inst(STANDARD_GATES["PSWAP"](pi / 2, *gate.qubits))
else:
negated_params = list(map(lambda x: -1 * x, gate.params))
daggered.inst(STANDARD_GATES[gate.name](*(negated_params + gate.qubits)))
else:
if inv_dict is None or gate.name not in inv_dict:
gate_inv_name = gate.name + suffix
else:
gate_inv_name = inv_dict[gate.name]
daggered.inst(tuple([gate_inv_name] + gate.qubits))
return daggered
def _synthesize(self):
"""
Takes a program which may contain placeholders and assigns them all defined values.
For qubit placeholders:
1. We look through the program to find all the known indexes of qubits and add them to a set
2. We create a mapping from undefined qubits to their newly assigned index
3. For every qubit placeholder in the program, if it's not already been assigned then look through the set of
known indexes and find the lowest available one
For label placeholders:
1. Start a counter at 1
2. For every label placeholder in the program, replace it with a defined label using the counter and increment
the counter
:return: List of AbstractInstructions with all placeholders removed
"""
used_indexes = set()
for instr in self._instructions:
if isinstance(instr, Gate):
for q in instr.qubits:
if not isinstance(q, QubitPlaceholder):
used_indexes.add(q.index)
elif isinstance(instr, Measurement):
if not isinstance(instr.qubit, QubitPlaceholder):
used_indexes.add(instr.qubit.index)
def find_available_index():
# Just do a linear search.
for i in count(start=0, step=1):
if i not in used_indexes:
return i
qubit_mapping = dict()
def remap_qubit(qubit):
if not isinstance(qubit, QubitPlaceholder):
return qubit
if id(qubit) in qubit_mapping:
return qubit_mapping[id(qubit)]
else:
available_index = find_available_index()
used_indexes.add(available_index)
remapped_qubit = Qubit(available_index)
qubit_mapping[id(qubit)] = remapped_qubit
return remapped_qubit
label_mapping = dict()
label_counter = 1
def remap_label(placeholder):
if id(placeholder) in label_mapping:
return label_mapping[id(placeholder)]
else:
label = Label(placeholder.prefix + str(label_counter))
label_mapping[id(placeholder)] = label
return label
result = []
for instr in self._instructions:
# Remap qubits on Gate and Measurement instructions
if isinstance(instr, Gate):
remapped_qubits = [remap_qubit(q) for q in instr.qubits]
result.append(Gate(instr.name, instr.params, remapped_qubits))
elif isinstance(instr, Measurement):
result.append(Measurement(remap_qubit(instr.qubit), instr.classical_reg))
# Remap any label placeholders on jump or target instructions
elif isinstance(instr, Jump) and isinstance(instr.target, LabelPlaceholder):
result.append(Jump(remap_label(instr.target)))
label_counter += 1
elif isinstance(instr, JumpTarget) and isinstance(instr.label, LabelPlaceholder):
result.append(JumpTarget(remap_label(instr.label)))
label_counter += 1
elif isinstance(instr, JumpConditional) and isinstance(instr.target, LabelPlaceholder):
new_label = remap_label(instr.target)
if isinstance(instr, JumpWhen):
result.append(JumpWhen(new_label, instr.condition))
elif isinstance(instr, JumpUnless):
result.append(JumpUnless(new_label, instr.condition))
else:
raise TypeError("Encountered a JumpConditional that wasn't JumpWhen or JumpUnless: {} {}"
.format(type(instr), instr))
label_counter += 1
# Otherwise simply add it to the result
else:
result.append(instr)
return result
def __add__(self, other):
"""
Concatenate two programs together, returning a new one.
:param Program other: Another program or instruction to concatenate to this one.
:return: A newly concatenated program.
:rtype: Program
"""
p = Program()
p.inst(self)
p.inst(other)
return p
def __getitem__(self, index):
"""
Allows indexing into the program to get an action.
:param index: The action at the specified index.
:return:
"""
return self.instructions[index]
def __iter__(self):
"""
Allow built in iteration through a program's instructions, e.g. [a for a in Program(X(0))]
:return:
"""
return self.instructions.__iter__()
def __eq__(self, other):
return isinstance(other, self.__class__) and self.out() == other.out()
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self._instructions)
def __str__(self):
return self.out()
def merge_programs(prog_list):
"""
Merges a list of pyQuil programs into a single one by appending them in sequence
:param list prog_list: A list of pyquil programs
:return: a single pyQuil program
:rtype: Program
"""
return sum(prog_list, Program())
| 1.78125
| 2
|
src/GL/sim/gql_ml_cv.py
|
kylmcgr/RL-RNN-SURF
| 2
|
12778870
|
<gh_stars>1-10
# Evaluates GQL in terms of cross-validation. Note that this file only evaluates the model and it needs
# the output of file 'fit/gql_ml_cv.py' for trained models.
from actionflow.qrl.gql import GQL
from actionflow.qrl.opt_ml import OptML
from actionflow.qrl.simulate import Simulator
from BD.data.data_reader import DataReader
import tensorflow as tf
from BD.util.paths import Paths
def evaluate_BD_CV():
data = DataReader.read_BD()
base_input_folder = Paths.rest_path + 'archive/beh/gql-ml-cv/'
base_output_folder = Paths.local_path + 'BD/evals/gql-ml-cv-evals/'
model_iters = ['model-final']
folds = {'Healthy': ['fold' + str(x) for x in range(0, 34)],
'Bipolar': ['fold' + str(x) for x in range(0, 33)],
'Depression': ['fold' + str(x) for x in range(0, 34)]
}
tf.reset_default_graph()
worker = GQL.get_instance(2, 2, {})
worker.set_params(OptML.get_variables(worker.get_params()))
def test_and_save(sess, test, output_folder):
return OptML.test_and_save("", output_folder, None, sess, test, worker)
Simulator.evaluate_CV(base_input_folder, base_output_folder, test_and_save, data, folds,
model_iters,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], random_tie=True)
if __name__ == '__main__':
evaluate_BD_CV()
| 2.265625
| 2
|
currint/tests/test_amount.py
|
valentin-eb/currint
| 15
|
12778871
|
<reponame>valentin-eb/currint<filename>currint/tests/test_amount.py
# encoding: utf8
from __future__ import unicode_literals
import six
from decimal import Decimal
from unittest import TestCase
from ..currency import currencies, Currency
from ..amount import Amount, _ZeroAmount
class AmountTests(TestCase):
def test_equality(self):
self.assertEqual(
Amount(currencies["GBP"], 132),
Amount(currencies["GBP"], 132),
)
self.assertNotEqual(
Amount(currencies["GBP"], 132),
Amount(currencies["USD"], 132),
)
self.assertNotEqual(
Amount(currencies["GBP"], 132),
Amount(currencies["GBP"], 99),
)
self.assertNotEqual(
Amount(currencies["GBP"], 132),
object(),
)
def test_add(self):
self.assertEqual(
Amount(currencies["GBP"], 132) + Amount(currencies["GBP"], 100),
Amount(currencies["GBP"], 232),
)
with self.assertRaises(ValueError):
Amount(currencies["GBP"], 132) + Amount(currencies["USD"], 100)
def test_subtract(self):
self.assertEqual(
Amount(currencies["GBP"], 132) - Amount(currencies["GBP"], 100),
Amount(currencies["GBP"], 32),
)
with self.assertRaises(ValueError):
Amount(currencies["GBP"], 132) - Amount(currencies["USD"], 100)
def test_comparison(self):
self.assertLess(
Amount(currencies["GBP"], 10),
Amount(currencies["GBP"], 20),
)
self.assertGreater(
Amount(currencies["GBP"], 20),
Amount(currencies["GBP"], 10),
)
self.assertLessEqual(
Amount(currencies["GBP"], 20),
Amount(currencies["GBP"], 20),
)
self.assertGreaterEqual(
Amount(currencies["GBP"], 20),
Amount(currencies["GBP"], 20),
)
def test_format(self):
self.assertEqual(
six.text_type(Amount(currencies["USD"], 132)),
"1.32 USD",
)
self.assertEqual(
six.text_type(Amount(currencies["USD"], -132)),
"-1.32 USD",
)
self.assertEqual(
six.text_type(Amount(
Currency("GBP", "826", 2, 'Pound Sterling', prefix="£"),
132,
)),
"£1.32",
)
def test_bool(self):
self.assertTrue(Amount(currencies["USD"], 1))
self.assertTrue(Amount(currencies["USD"], -1))
self.assertFalse(Amount(currencies["USD"], 0))
self.assertTrue(Amount.from_code_and_major("USD", Decimal('0.01')))
self.assertTrue(Amount.from_code_and_major("USD", Decimal('-0.01')))
self.assertFalse(Amount.from_code_and_major("USD", Decimal('0.00')))
def test_apply_factor(self):
self.assertEqual(
Amount(currencies["GBP"], 150).apply_factor(2),
Amount(currencies["GBP"], 300),
)
self.assertEqual(
Amount(currencies["GBP"], 100).apply_factor(Decimal("1.004")),
Amount(currencies["GBP"], 100),
)
self.assertEqual(
Amount(currencies["GBP"], 100).apply_factor(Decimal("1.005")),
Amount(currencies["GBP"], 101),
)
with self.assertRaises(ValueError):
Amount(currencies["GBP"], 100).apply_factor(1.005)
def test_from_code_and_minor(self):
self.assertEqual(
Amount.from_code_and_minor("GBP", 300),
Amount(currencies["GBP"], 300),
)
self.assertEqual(
Amount.from_code_and_minor("gbp", 300),
Amount(currencies["GBP"], 300),
)
with self.assertRaises(ValueError):
Amount.from_code_and_minor("WAITWHAT", 100)
def test_from_code_and_major(self):
self.assertEqual(
Amount.from_code_and_major("GBP", "3.00"),
Amount(currencies["GBP"], 300),
)
self.assertEqual(
Amount.from_code_and_major("gbp", ".10"),
Amount(currencies["GBP"], 10),
)
self.assertEqual(
Amount.from_code_and_major("GBP", 10),
Amount(currencies["GBP"], 1000),
)
self.assertEqual(
Amount.from_code_and_major("GBP", Decimal("10.01")),
Amount(currencies["GBP"], 1001),
)
with self.assertRaises(ValueError):
Amount.from_code_and_major("WAITWHAT", 100)
with self.assertRaises(ValueError):
Amount.from_code_and_major("GBP", "12.432")
with self.assertRaises(ValueError):
Amount.from_code_and_major("GBP", "aaaaaaah")
def test_to_major_decimal(self):
self.assertEqual(
Amount(currencies["GBP"], 300).to_major_decimal(),
Decimal("3.00"),
)
self.assertEqual(
Amount(currencies["USD"], 3).to_major_decimal(),
Decimal("0.03"),
)
self.assertEqual(
Amount(currencies["GBP"], -425).to_major_decimal(),
Decimal("-4.25"),
)
self.assertEqual(
Amount(currencies["MRO"], 7).to_major_decimal(),
Decimal("1.4"), # It's written 1.2, but is 1.4 of the major unit
)
def test_convert_currency(self):
self.assertEqual(
Amount(currencies["GBP"], 300).convert_currency("USD", 1),
Amount(currencies["USD"], 300),
)
self.assertEqual(
Amount(currencies["GBP"], 300).convert_currency("USD", 1.5),
Amount(currencies["USD"], 450),
)
self.assertEqual(
Amount(currencies["GBP"], 300).convert_currency("CHF", 0.43215),
Amount(currencies["CHF"], 130),
)
self.assertEqual(
Amount(currencies["GBP"], 300).convert_currency("EUR", Decimal("0.91")),
Amount(currencies["EUR"], 273),
)
def test_integral_division(self):
self.assertEqual(
Amount(currencies["GBP"], 300).integral_division(1),
Amount(currencies["GBP"], 300),
)
self.assertEqual(
Amount(currencies["GBP"], 300).integral_division(3),
Amount(currencies["GBP"], 100),
)
with self.assertRaises(ValueError):
Amount(currencies["GBP"], 300).integral_division(2.2)
with self.assertRaises(ValueError):
Amount(currencies["GBP"], 300).integral_division(301)
def test_divide_and_round(self):
self.assertEqual(
Amount(currencies["GBP"], 300).divide_and_round(1),
Amount(currencies["GBP"], 300),
)
self.assertEqual(
Amount(currencies["GBP"], 300).divide_and_round(3),
Amount(currencies["GBP"], 100),
)
self.assertEqual(
Amount(currencies["GBP"], 300).divide_and_round(4.5),
Amount(currencies["GBP"], 67),
)
self.assertEqual(
Amount(currencies["GBP"], 1).divide_and_round(2),
Amount(currencies["GBP"], 1),
)
class ZeroAmountTests(TestCase):
def setUp(self):
self.nonzero = Amount(currencies["GBP"], 300)
self.negative = Amount(currencies["GBP"], -50)
def test_singleton(self):
self.assertIs(Amount.ZERO, _ZeroAmount())
def test_simple_addition(self):
amt = Amount.ZERO + self.nonzero
self.assertEqual(amt.currency, self.nonzero.currency)
self.assertEqual(amt.value, self.nonzero.value)
def test_simple_raddition(self):
amt = self.nonzero + Amount.ZERO
self.assertEqual(amt.currency, self.nonzero.currency)
self.assertEqual(amt.value, self.nonzero.value)
def test_sum(self):
amt = sum([self.nonzero], Amount.ZERO)
self.assertEqual(amt.currency, self.nonzero.currency)
self.assertEqual(amt.value, self.nonzero.value)
def test_subr(self):
amt = self.nonzero - Amount.ZERO
self.assertEqual(amt.currency, self.nonzero.currency)
self.assertEqual(amt.value, self.nonzero.value)
def test_subl(self):
amt = Amount.ZERO - self.nonzero
self.assertEqual(amt.currency, self.nonzero.currency)
self.assertEqual(amt.value, -self.nonzero.value)
def test_sum_with_zeroes(self):
amt = sum([Amount.ZERO, self.nonzero, Amount.ZERO], Amount.ZERO)
self.assertEqual(amt.currency, self.nonzero.currency)
self.assertEqual(amt.value, self.nonzero.value)
def test_comparison(self):
self.assertGreater(self.nonzero, Amount.ZERO)
self.assertLess(Amount.ZERO, self.nonzero)
self.assertLess(self.negative, Amount.ZERO)
self.assertGreater(Amount.ZERO, self.negative)
self.assertEqual(Amount.ZERO, Amount(currencies["GBP"], 0))
self.assertEqual(Amount(currencies["GBP"], 0), Amount.ZERO)
self.assertGreaterEqual(Amount.ZERO, Amount(currencies["GBP"], 0))
self.assertGreaterEqual(Amount(currencies["GBP"], 0), Amount.ZERO)
self.assertLessEqual(Amount.ZERO, Amount(currencies["GBP"], 0))
self.assertLessEqual(Amount(currencies["GBP"], 0), Amount.ZERO)
self.assertNotEqual(Amount(currencies["GBP"], 100), Amount.ZERO)
self.assertNotEqual(Amount(currencies["GBP"], 100), "foobar")
self.assertNotEqual(Amount.ZERO, "foobar")
def test_to_major_decimal(self):
self.assertEqual(Amount.ZERO.to_major_decimal(), Decimal('0'))
def test_str(self):
try:
six.text_type(Amount.ZERO)
except:
self.fail("str(Amount.ZERO) raised an exception")
def test_repr(self):
try:
repr(Amount.ZERO)
except:
self.fail("repr(Amount.ZERO) raised an exception")
def test_forbidden_from_code_and_minor(self):
with self.assertRaises(NotImplementedError):
_ZeroAmount.from_code_and_minor('USD', 100)
def test_forbidden_from_code_and_major(self):
with self.assertRaises(NotImplementedError):
_ZeroAmount.from_code_and_minor('USD', Decimal('1.00'))
| 3.15625
| 3
|
setup.py
|
geotip/django-rest-params
| 0
|
12778872
|
# -*- coding: utf-8 -*-
from codecs import open # To use a consistent encoding
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='django-rest-params',
version='1.0.0',
description='Function decorator for Django REST Framework for specifying and constraining API parameters.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/cammsaul/django-rest-params',
# Author details
author='<NAME>',
author_email='<EMAIL>',
# Choose your license
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
],
keywords='rest,django,api,params,parameters,djangorestframework,decorator',
packages=find_packages(exclude=['tests']),
install_requires=['django', 'djangorestframework']
)
| 1.601563
| 2
|
Scripts/find_imposter.py
|
yogeshwaran01/Mini-Projects
| 4
|
12778873
|
"""
There is an array with some numbers.
All numbers are equal except for one(imposter).
"""
def imposter(arr: list) -> str:
"""
>>> imposter([1,2,1,1,1,1])
2
>>> imposter(["python", "java", "python", "python"])
'java'
"""
n = []
s = set(arr)
for e in s:
if arr.count(e) == 1:
n.append(e)
return n[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3.578125
| 4
|
model/bifpn.py
|
sevakon/efficientdet
| 25
|
12778874
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.efficientnet.utils import MemoryEfficientSwish as Swish
from model.module import DepthWiseSeparableConvModule as DWSConv
from model.module import MaxPool2dSamePad
class BiFPN(nn.Module):
"""
BiFPN block.
Depending on its order, it either accepts
seven feature maps (if this block is the first block in FPN) or
otherwise five feature maps from the output of the previous BiFPN block
"""
EPS: float = 1e-04
REDUCTION_RATIO: int = 2
def __init__(self, n_channels):
super(BiFPN, self).__init__()
self.conv_4_td = DWSConv(n_channels, n_channels, relu=False)
self.conv_5_td = DWSConv(n_channels, n_channels, relu=False)
self.conv_6_td = DWSConv(n_channels, n_channels, relu=False)
self.weights_4_td = nn.Parameter(torch.ones(2))
self.weights_5_td = nn.Parameter(torch.ones(2))
self.weights_6_td = nn.Parameter(torch.ones(2))
self.conv_3_out = DWSConv(n_channels, n_channels, relu=False)
self.conv_4_out = DWSConv(n_channels, n_channels, relu=False)
self.conv_5_out = DWSConv(n_channels, n_channels, relu=False)
self.conv_6_out = DWSConv(n_channels, n_channels, relu=False)
self.conv_7_out = DWSConv(n_channels, n_channels, relu=False)
self.weights_3_out = nn.Parameter(torch.ones(2))
self.weights_4_out = nn.Parameter(torch.ones(3))
self.weights_5_out = nn.Parameter(torch.ones(3))
self.weights_6_out = nn.Parameter(torch.ones(3))
self.weights_7_out = nn.Parameter(torch.ones(2))
self.upsample = lambda x: F.interpolate(x, scale_factor=self.REDUCTION_RATIO)
self.downsample = MaxPool2dSamePad(self.REDUCTION_RATIO + 1, self.REDUCTION_RATIO)
self.act = Swish()
def forward(self, features):
if len(features) == 5:
p_3, p_4, p_5, p_6, p_7 = features
p_4_2, p_5_2 = None, None
else:
p_3, p_4, p_4_2, p_5, p_5_2, p_6, p_7 = features
# Top Down Path
p_6_td = self.conv_6_td(
self._fuse_features(
weights=self.weights_6_td,
features=[p_6, self.upsample(p_7)]
)
)
p_5_td = self.conv_5_td(
self._fuse_features(
weights=self.weights_5_td,
features=[p_5, self.upsample(p_6_td)]
)
)
p_4_td = self.conv_4_td(
self._fuse_features(
weights=self.weights_4_td,
features=[p_4, self.upsample(p_5_td)]
)
)
p_4_in = p_4 if p_4_2 is None else p_4_2
p_5_in = p_5 if p_5_2 is None else p_5_2
# Out
p_3_out = self.conv_3_out(
self._fuse_features(
weights=self.weights_3_out,
features=[p_3, self.upsample(p_4_td)]
)
)
p_4_out = self.conv_4_out(
self._fuse_features(
weights=self.weights_4_out,
features=[p_4_in, p_4_td, self.downsample(p_3_out)]
)
)
p_5_out = self.conv_5_out(
self._fuse_features(
weights=self.weights_5_out,
features=[p_5_in, p_5_td, self.downsample(p_4_out)]
)
)
p_6_out = self.conv_6_out(
self._fuse_features(
weights=self.weights_6_out,
features=[p_6, p_6_td, self.downsample(p_5_out)]
)
)
p_7_out = self.conv_7_out(
self._fuse_features(
weights=self.weights_7_out,
features=[p_7, self.downsample(p_6_out)]
)
)
return [p_3_out, p_4_out, p_5_out, p_6_out, p_7_out]
def _fuse_features(self, weights, features):
weights = F.relu(weights)
num = sum([w * f for w, f in zip(weights, features)])
det = sum(weights) + self.EPS
x = self.act(num / det)
return x
| 2.546875
| 3
|
madminer/plotting/__init__.py
|
johannbrehmer/madminer
| 13
|
12778875
|
<reponame>johannbrehmer/madminer
from .distributions import plot_distributions, plot_histograms
from .morphing import (
plot_2d_morphing_basis,
plot_nd_morphing_basis_scatter,
plot_nd_morphing_basis_slices,
plot_1d_morphing_basis,
)
from .fisherinformation import (
plot_fisherinfo_barplot,
plot_fisher_information_contours_2d,
plot_distribution_of_information,
)
from .limits import plot_pvalue_limits
from .uncertainties import plot_systematics, plot_uncertainty
| 1.507813
| 2
|
tests/test_node.py
|
luminescence/pycrunchbase
| 67
|
12778876
|
from unittest import TestCase
import json
from pycrunchbase.resource.node import Node
from pycrunchbase.resource.utils import parse_date
class TestNode(Node):
KNOWN_PROPERTIES = ['property1', 'property2']
def _coerce_values(self):
# intentionally coerce bad values for test purposes
attr = 'property1'
if getattr(self, attr, None):
setattr(self, attr, parse_date(getattr(self, attr)))
data = {
"type": "TestNode",
"uuid": "uuid",
'properties': {
'property1': 'one',
'property2': 'two'
},
'relationships': {
'unknown': {
'paging': {},
'items': {}
}
},
}
class NodeTestCase(TestCase):
def test_node_creation_from_dict(self):
node = TestNode(data)
self.assertEqual(node.property1, 'one')
self.assertEqual(node.property2, 'two')
def test_node_creation_from_string(self):
node = TestNode(json.dumps(data))
self.assertEqual(node.property1, 'one')
self.assertEqual(node.property2, 'two')
| 3.078125
| 3
|
django_project/weather/urls.py
|
bbsoft0/weather
| 1
|
12778877
|
<filename>django_project/weather/urls.py
from django.urls import path,include
from . import views
urlpatterns = [
path('',views.index,name="home"),
path('about/',views.about,name="about"),
path('help/',views.help,name="help"),
path('delete/<city_name>/',views.delete_city,name="delete_city"),
]
| 2.0625
| 2
|
ib/utilities.py
|
dhsdshdhk/tchan
| 0
|
12778878
|
import re
import html
import string
from functools import partial
from PIL import Image
from os.path import split, splitext
import random
from django.core.files.uploadedfile import InMemoryUploadedFile
from ib.models import Post, File
from django.conf import settings
from os.path import join
import subprocess
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
def validate_text(text):
""" Returns True if text exists and is more than white spaces, False otherwise."""
return bool(text) and not text.isspace()
def remove_invalid_files(files, request):
""" Returns a list with only valid files, discarding files that aren't accepted.
'blob' is sent by Dropzone.js in posts without files. """
accepted = []
for f in [i for i in files if i.name != 'blob']:
if check_file(f):
accepted.append(f)
else:
messages.error(request, f"Upload error: {f} too large or extension not allowed.")
return accepted
def validate_post(text, files, embed, thread_id, request):
valid_text = validate_text(text)
if thread_id:
if valid_text or files or embed:
return True
else:
messages.error(request, "A post must have text, a file or a YouTube embed.")
else:
if valid_text and (files or embed):
return True
elif not valid_text:
messages.error(request, "A thread must have text.")
else:
messages.error(request, "A thread must have files or a YouTube embed.")
return False
def validate_embed(embed, request):
if not embed:
return None, None, None
""" Validates an embed string. If valid, returns the url, the provider and the id, else return None. """
patterns = [
'((?:https?:\\/\\/)?(?:www\\.)?(?:youtu\\.be\\/|youtube\\.com\\/(?:embed\\/|v\\/|watch\\?v=|watch\\?.+&v=))((\\w|-){11})(?:\\S+)?)']
for p in patterns:
matches = re.findall(p, embed)
if matches:
return matches[0][0], "youtube", matches[0][1]
messages.error(request, 'Bad URL. Could not embed.')
return None, None, None
def prepare_text(text):
""" Escapes text, adds <span> tags for greentext, red text and spoiler text, rainbow text, big text and quotes. """
text = html.escape(text)
def spanify(m, klass):
"""Nobody expects the Spanish inquisition!
Insert span with desired class:
m - match object
klass - the class name"""
carriage_return = '\r' # In Firefox based browsers, a \r will appear at the end of a green text.
return f'<span class="{klass}">{m[1].replace(carriage_return, "")}</span>'
regexes = [('greentext', '^(>.+)$'), # > green line
('redtext', r'==(.+?)=='), # == red text ==
('spoilertext', r'\*\*(.+?)\*\*'), # ** spoiler text **
('rainbowtext', r'%%(.+?)%%'), # %% rainbow text %%
('bigtext', r'##(.+?)##'), # ## big text ##
('boldtext', r''''(.+?)''''), ] # ''' bold text '''
for name, p in regexes:
text = re.sub(p, partial(spanify, klass=name), text, flags=re.MULTILINE)
quotes = set(re.findall(r'<<\d+', text))
for q in quotes:
try:
p = Post.objects.get(pk=int(q[8:]))
text = text.replace(q, f'<a class="quote" href="/{p.board}/thread/{get_thread(p)}/#{p.id}">{q}</a>')
except Post.DoesNotExist:
continue
p = '((?:https?:\\/\\/)?(?:www\\.)?(?:youtu\\.be\\/|youtube\\.com\\/(?:embed\\/|v\\/|watch\\?v=|watch\\?.+&v=))((\\w|-){11})(?:\\S+)?)'
text = re.sub(p, r'\1' + ' <a class="embedButton" data-from="youtube" data-id="' + r'\2' + '">[Embed]</a>', text)
p = '(https:\/\/www\.pornhub\.com\/view_video\.php\?viewkey=(\w*))'
text = re.sub(p, r'\1' + ' <a class="embedButton" data-from="pornhub" data-id="' + r'\2' + '">[Embed]</a>', text)
return text
def create_post(post: dict, files, request, spoilers) -> int:
p = Post(**post)
p.save()
file_objs = []
for file in files:
original_name = file.name
ext = splitext(file.name)[1]
file.name = ''.join(random.choices(string.ascii_letters + string.digits, k=8)) + ext
file_entry = File(post=p, original_name=original_name,
spoiler=(original_name in spoilers) or ('all' in spoilers))
file_entry.file.name = file.name
write_file(file)
f = file_entry
f.mimetype = file.content_type
f.thumb = f.get_thumb()
f.size_str = f.get_size()
f.original_name_shortened = f.get_original_name_shortened()
if not (f.mimetype.startswith('audio/') or f.mimetype == 'application/epub+zip'):
create_thumb(f)
f.width, f.height = f.get_dimensions()
f.thumb_width, f.thumb_height = f.get_dimensions(thumb=True)
file_objs.append(file_entry)
if file_objs:
File.objects.bulk_create(file_objs)
return (post['thread'].id if post['thread'] else p.id, p)
def write_file(file: InMemoryUploadedFile):
with open(join(settings.MEDIA_ROOT, file.name), 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
def create_thumb(file):
full_file_path = file.file.path
ext = splitext(full_file_path)[1]
dir, fname = split(full_file_path)
fname = 't_' + fname
max_dimension = 256
scale_string = f"scale='trunc(min(1,min({max_dimension}/iw,{max_dimension}/ih))*iw/2)*2':'trunc(min(1,min({max_dimension}/iw,{max_dimension}/ih))*ih/2)*2'"
if ext.lower() in ['.mp4', '.webm']:
subprocess.run(
['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-itsoffset', '-1', '-i', full_file_path, '-vframes', '1',
'-filter:v', scale_string,
join(dir, fname).replace(ext, '.jpg')])
elif ext.lower() == '.gif':
subprocess.run(
['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-i', full_file_path, '-vf', scale_string, '-an',
join(dir, fname)])
elif ext.lower() == '.pdf':
subprocess.run(
['pdftoppm', '-jpeg', full_file_path, join(dir, fname).replace(ext, ''), '-scale-to', '256', '-singlefile'])
else:
im = Image.open(full_file_path)
im.thumbnail((max_dimension, max_dimension))
try:
im.save(join(dir, fname))
except OSError: # OSError might happen if it's a gif renamed as jpg
im.save(join(dir, fname), format='gif') # this will prevent server error, but the thumbnail will be static
def check_file(file: InMemoryUploadedFile):
return file.size <= 100 * 1e6 and file.name.lower().endswith(
('jpeg', 'jpg', 'gif', 'png', 'webp', 'webm', 'mp4', 'mp3', 'ogg', 'flac', 'opus', 'pdf', 'epub'))
def get_thread(post):
if post.thread:
return post.thread.id
else:
return post.id
| 2.234375
| 2
|
RNNS/model/baseLangRNN.py
|
CenIII/Text-style-transfer-DeleteRetrieve
| 0
|
12778879
|
<gh_stars>0
import torch.nn as nn
from .baseRNN import BaseRNN
import numpy as np
class baseLangRNN(BaseRNN):
def __init__(self, vocab_size, max_len, hidden_size,
input_dropout_p=0, dropout_p=0,
n_layers=1, bidirectional=False, rnn_cell='gru', variable_lengths=False,
embedding=None, update_embedding=False):
super(baseLangRNN, self).__init__(vocab_size, max_len, hidden_size,
input_dropout_p, dropout_p, n_layers, rnn_cell)
self.variable_lengths = variable_lengths
self.embedding = nn.Embedding(vocab_size, 300)
if embedding is not None:
self.embedding.weight = nn.Parameter(embedding)
self.embedding.weight.requires_grad = update_embedding
self.rnn = self.rnn_cell(300, hidden_size, n_layers,
batch_first=True, bidirectional=bidirectional, dropout=dropout_p)
self.decoder = nn.Linear(hidden_size,self.vocab_size)
def forward(self, input_var, input_lengths=None):
"""
Applies a multi-layer RNN to an input sequence.
Args:
input_var (batch, seq_len): tensor containing the features of the input sequence.
input_lengths (list of int, optional): A list that contains the lengths of sequences
in the mini-batch
Returns: output, hidden
- **output** (batch, seq_len, hidden_size): variable containing the encoded features of the input sequence
- **hidden** (num_layers * num_directions, batch, hidden_size): variable containing the features in the hidden state h
"""
# import pdb;pdb.set_trace()
inds = np.argsort(-input_lengths)
input_var = input_var[inds]
input_lengths = input_lengths[inds]
rev_inds = np.argsort(inds)
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded)
if self.variable_lengths:
embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True)
output, hidden = self.rnn(embedded)
if self.variable_lengths:
output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
output = output[rev_inds]
hidden = hidden[:,rev_inds]
# pdb.set_trace()
output = self.decoder(output) # torch.Size([seq_len, batch, vocab_size])
return output, hidden
| 2.59375
| 3
|
stream/tests.py
|
0xdc/estuary-app-livestream
| 0
|
12778880
|
from django.test import TestCase
# Create your tests here.
from .models import Stream
class StreamTests(TestCase):
pass
| 1.289063
| 1
|
question34.py
|
larkaa/project_euler
| 0
|
12778881
|
#!/usr/bin/env python3
# quesiton 34 digit factorials
#145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
#Find the sum of all numbers which are equal to the sum of the factorial of their digits.
#Note: as 1! = 1 and 2! = 2 are not sums they are not included.
#idea brute force
# note that
# 3 != 3!, similar up to 11, 12, 13, and 14 up is too high. start with 20
# 14 = 1 + 24 NOPE
# 15 = 1 + too high
def factorial(num):
if num==0: return 1
if num==1: return 1
else:
res=num
num-=1
while num>1:
res *= num
num-=1
return(res)
# to speed things up pre-calculate the factorials
# to be used in the following function
fact_list =[]
for i in range(10):
fact_list.append(factorial(i))
def digit_factorial(num):
num_str = str(num)
temp_sum = 0
for char in num_str:
temp_sum+= fact_list[int(char)]
return temp_sum
def find_numbers(limit):
res_list = []
for i in range(3,limit+1):
temp = digit_factorial(i)
if i == temp:
res_list.append(i)
return res_list
#print(digit_factorial(1546))
# 145 is the smallest!
# note that 9! = 362880, 7*9 < 9999999
print(find_numbers(1000000))
# there are only two!
# [145, 40585]
# sum = 40730
| 3.890625
| 4
|
code/runner.py
|
rapidclock/simple-neural-network
| 0
|
12778882
|
<filename>code/runner.py
from data_prep import process_csv
from nn.model import NeuralNetwork
from nn.layers import InputLayer, Dense
from nn.loss import CrossEntropy
from nn.optimizer import SGD
from nn.activations import sigmoid, tanh
test_file = '../data/mnist_test.csv'
train_file = '../data/mnist_train.csv'
x_train, y_train = process_csv(train_file)
x_test, y_test = process_csv(test_file)
model = NeuralNetwork()
model.addLayer(InputLayer((1, 784)))
model.addLayer(Dense(neuron_count=300, activation=tanh()))
model.addLayer(Dense(neuron_count=10, activation=sigmoid()))
model.compile(loss=CrossEntropy(), optimizer=SGD(alpha=0.000006))
train_loss, train_acc, val_loss, val_acc = model.fit(x_test, y_test, validation_set=True,
validation_split=0.1, epochs=1, batch_size=100)
| 2.96875
| 3
|
socialforcemodel/math.py
|
bazylip/socialforcemodel
| 2
|
12778883
|
def length_squared(vector):
""" Return the length squared of a vector. """
return vector[0]**2 + vector[1]**2
| 3.734375
| 4
|
hospital/admin.py
|
kurbster/HospitalManagement
| 1
|
12778884
|
from django.contrib import admin
from .models import *
# Register your models here.
class DoctorAdmin(admin.ModelAdmin):
pass
admin.site.register(Doctor, DoctorAdmin)
class HospitalStaffAdmin(admin.ModelAdmin):
pass
admin.site.register(HospitalStaff, HospitalStaffAdmin)
#insurance created by prem
class InsuranceAdmin(admin.ModelAdmin):
pass
admin.site.register(Insurance, InsuranceAdmin)
class PatientAdmin(admin.ModelAdmin):
pass
admin.site.register(Patient, PatientAdmin)
class AppointmentAdmin(admin.ModelAdmin):
pass
admin.site.register(Appointment, AppointmentAdmin)
class PatientDischargeDetailsAdmin(admin.ModelAdmin):
pass
admin.site.register(PatientDischargeDetails, PatientDischargeDetailsAdmin)
class Patient_LabTest_RecordsAdmin(admin.ModelAdmin):
pass
admin.site.register(Patient_LabTest_Records,Patient_LabTest_RecordsAdmin)
class LabTestsAdmin(admin.ModelAdmin):
pass
admin.site.register(LabTests,LabTestsAdmin)
class LabStaffAdmin(admin.ModelAdmin):
pass
admin.site.register(LabStaff,LabStaffAdmin)
class DiagnosisAdmin(admin.ModelAdmin):
pass
admin.site.register(Diagnosis, DiagnosisAdmin)
class PrescriptionAdmin(admin.ModelAdmin):
pass
admin.site.register(Prescription, PrescriptionAdmin)
| 1.875
| 2
|
example/ex_orbit.py
|
NSLS-II/aphla
| 0
|
12778885
|
<filename>example/ex_orbit.py
import aphla as ap
import numpy as np
import matplotlib.pylab as plt
import time
print ap.__path__
ap.initNSLS2V1()
bpms = ap.getElements('BPM')
#trims = ap.getGroupMembers(['*', '[HV]COR'], op='intersection')
trims = ap.getElements('HCOR')[:30] + ap.getElements('VCOR')[-30:]
print "Bpms x Trims: (%d, %d)" % (len(bpms), len(trims) )
v0 = ap.getOrbit(spos=True)
k0 = []
for tr in trims:
#print tr
if u'x' in tr.fields(): k0.append([tr.sb, tr.x])
if u'y' in tr.fields(): k0.append([tr.sb, tr.y])
n1, n2 = 10, 20
ap.correctOrbit([e.name for e in bpms[n1:n2]], [e.name for e in trims],
scale=0.7, repeat=3)
#ap.correctOrbit(scale=0.7, repeat=9)
#Euclidian norm: ...
time.sleep(4)
v1 = ap.getOrbit(spos=True)
time.sleep(4)
v2 = ap.getOrbit(spos=True)
k1 = []
for tr in trims:
#print tr
if u'x' in tr.fields(): k1.append([tr.sb, tr.x])
if u'y' in tr.fields(): k1.append([tr.sb, tr.y])
# plotting
plt.clf()
fig = plt.figure(1, figsize=(12,9))
ax = fig.add_subplot(311)
ax.annotate("H orbit before/after correction", (0.03, 0.9),
xycoords='axes fraction')
ax.plot(v0[:,-1], v0[:,0], 'r-')
ax.plot(v1[:,-1], v1[:,0], 'g--')
ax.plot(v2[n1:n2,-1], v2[n1:n2,0], 'g-o')
#ax.legend()
ax = fig.add_subplot(312)
ax.annotate("V orbit before/after correction", (0.03, 0.9),
xycoords='axes fraction')
ax.plot(v0[:,-1], v0[:,1], 'r-', label='Y')
ax.plot(v1[:,-1], v1[:,1], 'g--', label='Y')
ax.plot(v2[n1:n2,-1], v2[n1:n2,1], 'g-o')
ax = fig.add_subplot(313)
k0, k1 = np.array(k0), np.array(k1)
ax.plot(k0[:,0], k0[:,1], 'r--v')
ax.plot(k1[:,0], k1[:,1], 'g-o')
plt.savefig("hla_tut_orbit_correct.png")
| 2.296875
| 2
|
core/utils.py
|
0xdia/BrainyBot
| 29
|
12778886
|
import discord
from collections.abc import Sequence
import json
import os
import requests
from types import SimpleNamespace
import sys
from core.errors import *
import base64
import requests
import json
def loads_to_object(json_file):
"""
Loads from a json file to a python object filling its properties with
dictionnary key
"""
return json.loads(open(json_file, "r").read(), object_hook=lambda d: SimpleNamespace(**d))
if not os.path.isfile("config.json"):
sys.exit("'config.json' not found! Please add it and try again.")
else:
config = loads_to_object("config.json")
async def getchannel(bot, id):
channel = bot.get_channel(id)
if not channel:
try:
channel = await bot.fetch_channel(id)
except discord.InvalidData:
channel = None
except discord.HTTPException:
channel = None
return channel
async def getuser(bot, id):
user = bot.get_user(id)
if not user:
user = await bot.fetch_user(id)
return user
async def getguild(bot, id):
guild = bot.get_guild(id)
if not guild:
guild = await bot.fetch_guild(id)
return guild
async def send_embed(context, title, description, color=int(config.EMBED_COLOR, 16)):
embed = discord.Embed(
title=title,
description=description,
color=color
)
await context.send(embed=embed)
def upload_file_to_github(file_path, file_name, repo_name, owner, branch_name, token):
url = "https://api.github.com/repos/"+owner+'/'+repo_name+"/contents/"+file_name
headers = {
"Authorization": "token " + token,
"Accept": "application/vnd.github.v3.raw",
"Content-Type": "application/json"
}
with open(file_path, "rb") as file:
data = {
"message": "Uploaded " + file_name + " to " + branch_name,
"content": base64.b64encode(file.read()).decode("utf-8")
}
response = requests.put(url, data=json.dumps(data), headers=headers)
if response.status_code == 201:
return response.json()["content"]["html_url"]
else:
return None
| 2.609375
| 3
|
dictionary/migrations/__init__.py
|
Sanquira/immortalfighters
| 0
|
12778887
|
# migrations.RunPython(race.initialize_races),
# migrations.RunPython(profession.init_professions),
# migrations.RunPython(spell.initialize_spell_directions),
# migrations.RunPython(skill.init_ranks_and_difficulty),
# migrations.RunPython(skill.init_skills),
# migrations.RunPython(beast.init_weakness),
# migrations.RunPython(sizes.init_creature_size),
# migrations.RunPython(beast.init_category)
| 1.453125
| 1
|
covid19_cases/hk_database/helperfunc.py
|
wtydavid99/COVID-19-cases
| 0
|
12778888
|
<reponame>wtydavid99/COVID-19-cases
import os
from datetime import date
import pandas as pd
from openpyxl import load_workbook
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def get_cases_tdy_by_loc(driver, location):
"""Get_cases_tdy_by_loc returns a DataFrame that reports covid-19 cases today of the target location."""
# Locate the search button and inputs the target location
search = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "input[type='search']"))
)
search = driver.find_element_by_css_selector("input[type='search']")
search.clear()
search.send_keys(location)
search.send_keys(Keys.RETURN)
# Click the 'Now' button the get cases today
now_button = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.LINK_TEXT, "Now"))
)
now_button.click()
# Retrieve a DataFrame with covid-19 cases today of target location by parsing html
html = driver.page_source
df = pd.read_html(html)[0]
return df
def export(df, location, dir):
"""This function exports the scraped data into location.csv file.
If location.xlsx does not exist, append to the existing excel file otherwise."""
# replace first column by date
today = date.today()
d = today.strftime("%b-%d-%Y")
hk_df = df.iloc[[0]].copy()
hk_df.iloc[:, 0] = d
# name the file name by location
filename = f"{location}.csv"
export_path = os.path.join(dir, filename)
if not os.path.isfile(export_path):
hk_df.to_csv(export_path, index=False)
elif d not in hk_df.iloc[:, 0].values:
hk_df.to_csv(export_path, mode="a", header=False, index=False)
return export_path
| 3.0625
| 3
|
PCI_o_B/DAMfile.py
|
MatteoMilani95/PCI_o_Bpy
| 1
|
12778889
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 1 13:49:13 2021
@author: Matteo
"""
import numpy as np
import matplotlib.pyplot as plt
import PCI_o_B
from PCI_o_B import CIfile as CI
from PCI_o_B import G2file as g2
from PCI_o_B import SharedFunctions as sf
class DAM(g2.G2):
def __init__(self,FolderName,CI,nROI,tau):
super().__init__(FolderName,CI,nROI,tau)
self.n_intervals = 0
self.tauDAM= []
self.g2DAM = []
self.g2varDAM = []
def __str__(self):
#write again this stuff
str_res = '\n|---------------|'
str_res += '\n| CIbead class: '
str_res += '\n|--------------------+--------------------|'
str_res += '\n| filelist : ' + str(self.ROIfilelist)
str_res += '\n| folder : ' + str(self.FolderName)
str_res += '\n| number of ROIs : ' + str(self.nROI)
str_res += '\n| ROIs size : ' + str(self.GetROIsize()) + ' px'
str_res += '\n| lag time : ' + str(self.lag)
str_res += '\n| x for theta(x)= 90° : ' + str(self.Center) + 'px'
str_res += '\n| Radius bead : ' + str(self.Center) +'px'
#str_res += '\n| Window of interest top : ' + str(self.GetWINDOWtop()) + ' px'
str_res += '\n|--------------------+--------------------|'
return str_res
def DAMCalculation(self,n_intervals):
self.n_intervals = n_intervals
l_intervals = int(len(self.CI[0]) / n_intervals )
time_list = []
for i in range(n_intervals):
time_list.append(i*l_intervals)
#calculation of the g2 for each roi for each interval
for i in range(n_intervals-1):
super().G2Calculation(time_list[i],time_list[i+1])
self.g2DAM.append(self.g2)
self.tauDAM.append(np.asarray(self.tau))
self.g2varDAM.append(self.g2var)
self.g2 = []
self.g2var = []
#self.tau = []
super().G2Calculation(time_list[-1],len(self.CI[0]))
self.g2DAM.append(self.g2)
self.g2varDAM.append(self.g2var)
self.tauDAM.append(np.asarray(self.tau))
'''
for i in range(n_intervals):
self.tauDAM[i].tolist()
print(type(self.tauDAM[i]))
print(len(self.tauDAM[i]))
'''
return
def DAMFitSingleDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitSingleDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime1 = []
self.decaytime1err = []
return
def DAMFitStretchDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitStretchDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime1 = []
self.decaytime1err = []
return
def DAMFitDoubleDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
self.decaytime2DAM = []
self.decaytime2errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitDoubleDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime2DAM.append(self.decaytime2)
self.decaytime2errDAM.append(self.decaytime2err)
self.decaytime1 = []
self.decaytime1err = []
self.decaytime2 = []
self.decaytime2err = []
return
def DAMFitSingleStretchDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
self.decaytime2DAM = []
self.decaytime2errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitSingleStretchDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime2DAM.append(self.decaytime2)
self.decaytime2errDAM.append(self.decaytime2err)
self.decaytime1 = []
self.decaytime1err = []
self.decaytime2 = []
self.decaytime2err = []
return
def DAMFitDoubleStretchDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
self.decaytime2DAM = []
self.decaytime2errDAM = []
for i in range(self.n_intervals):
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitDoubleStretchDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime2DAM.append(self.decaytime2)
self.decaytime2errDAM.append(self.decaytime2err)
self.decaytime1 = []
self.decaytime1err = []
self.decaytime2 = []
self.decaytime2err = []
return
| 2.28125
| 2
|
app/public/views.py
|
dev-johnlopez/astrix
| 0
|
12778890
|
# -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import (
Blueprint,
current_app,
flash,
redirect,
render_template,
request,
url_for,
)
from flask_login import login_required, login_user, logout_user
from app.extensions import login_manager
from app.public.forms import LoginForm, ContactForm
from app.user.forms import RegisterForm
from app.user.models import User
from app.utils import flash_errors, flash_success
from app.emails import send_new_contact_form_email
blueprint = Blueprint("public", __name__, static_folder="../static")
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id))
@blueprint.route("/", methods=["GET", "POST"])
def home():
"""Home page."""
form = LoginForm(request.form)
current_app.logger.info("Hello from the home page!")
# Handle logging in
if request.method == "POST":
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", "success")
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("public/home.html", form=form)
@blueprint.route("/contact", methods=["GET", "POST"])
def contact():
"""Home page."""
form = ContactForm()
current_app.logger.info("Hello from the contact page!")
# Handle logging in
if request.method == "POST":
current_app.logger.info("POSTING!")
if form.validate_on_submit():
current_app.logger.info("SUCCESS!")
current_app.logger.info("SUCCESS!")
current_app.logger.info("SUCCESS!")
current_app.logger.info("SUCCESS!")
current_app.logger.info("SUCCESS!")
send_new_contact_form_email(form)
flash_success("We received your message!")
else:
flash_errors(form)
return render_template("public/contact.html", form=form)
@blueprint.route("/demo", methods=["GET", "POST"])
def demo():
return render_template("public/demo.html")
@blueprint.route("/logout/")
@login_required
def logout():
"""Logout."""
logout_user()
flash("You are logged out.", "info")
return redirect(url_for("public.home"))
@blueprint.route("/register/", methods=["GET", "POST"])
def register():
"""Register new user."""
form = RegisterForm(request.form)
if form.validate_on_submit():
User.create(
username=form.username.data,
email=form.email.data,
password=<PASSWORD>,
active=True,
)
flash("Thank you for registering. You can now log in.", "success")
return redirect(url_for("public.home"))
else:
flash_errors(form)
return render_template("public/register.html", form=form)
@blueprint.route("/about/")
def about():
"""About page."""
form = LoginForm(request.form)
return render_template("public/about.html", form=form)
| 2.390625
| 2
|
empower/managers/ranmanager/vbsp/vbshandler.py
|
joncnet/empower-runtime
| 0
|
12778891
|
<filename>empower/managers/ranmanager/vbsp/vbshandler.py
#!/usr/bin/env python3
#
# Copyright (c) 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VBSP Handlers."""
import empower.managers.apimanager.apimanager as apimanager
from empower.core.etheraddress import EtherAddress
# pylint: disable=W0223
class VBSHandler(apimanager.EmpowerAPIHandler):
"""Handler for accessing VBSes."""
URLS = [r"/api/v1/vbses/?",
r"/api/v1/vbses/([a-zA-Z0-9:]*)/?"]
@apimanager.validate(max_args=1)
def get(self, *args, **kwargs):
"""List devices.
Args:
[0]: the device address (optional)
Example URLs:
GET /api/v1/vbses
[
{
"addr": "00:00:00:00:00:01",
"cells": {},
"connection": null,
"desc": "Ettus B210",
"last_seen": 0,
"last_seen_ts": "1970-01-01T01:00:00.000000Z",
"period": 0,
"state": "disconnected"
}
]
GET /api/v1/vbses/00:00:00:00:00:01
{
"addr": "00:00:00:00:00:01",
"cells": {},
"connection": null,
"desc": "Ettus B210",
"last_seen": 0,
"last_seen_ts": "1970-01-01T01:00:00.000000Z",
"period": 0,
"state": "disconnected"
}
"""
return self.service.devices \
if not args else self.service.devices[EtherAddress(args[0])]
@apimanager.validate(returncode=201, min_args=0, max_args=0)
def post(self, *args, **kwargs):
"""Add a new device.
Request:
version: protocol version (1.0)
addr: the device address (mandatory)
desc: a human readable description of the device (optional)
Example URLs:
POST /api/v1/vbses
{
"version":"1.0",
"addr": "00:00:00:00:00:01"
}
POST /api/v1/vbses
{
"version":"1.0",
"addr": "00:00:00:00:00:01",
"desc": "Ettus B210"
}
"""
addr = EtherAddress(kwargs['addr'])
if 'desc' in kwargs:
device = self.service.create(addr, kwargs['desc'])
else:
device = self.service.create(addr)
self.set_header("Location", "/api/v1/vbses/%s" % device.addr)
@apimanager.validate(returncode=204, min_args=0, max_args=1)
def delete(self, *args, **kwargs):
"""Delete one or all devices.
Args:
[0]: the device address
Example URLs:
DELETE /api/v1/vbses
DELETE /api/v1/vbses/00:00:00:00:00:01
"""
if args:
self.service.remove(EtherAddress(args[0]))
else:
self.service.remove_all()
| 1.914063
| 2
|
app/index/routes.py
|
lambda-science/IMPatienT
| 5
|
12778892
|
from app.index import bp
from flask import render_template
@bp.route("/")
def index():
"""View function for the Index page
Returns:
str: HTML template for the Index page
"""
return render_template("index.html")
| 2.34375
| 2
|
app/misc/inline_constructor/models/button.py
|
vitaliy-ukiru/math-bot
| 1
|
12778893
|
<reponame>vitaliy-ukiru/math-bot
__all__ = (
"Button",
)
from typing import Optional, Union
from aiogram.types import InlineKeyboardButton
from .base import ButtonTypes, BaseObject
from app.keyboards import custom_gen_cb
from app.utils.exceptions import ConstructorException
def _format_button_type(obj_type: str) -> str:
"""
Форматирование допустимых (оригинальные названия) во внутренние типы
:param obj_type: Исходный тип
:return: Внутренний тип
"""
_types = {
'switch_inline_query_current_chat': ButtonTypes.INLINE_CURRENT_CHAT,
'switch_inline_query': ButtonTypes.INLINE,
'inline_query_me_chat': ButtonTypes.INLINE_CURRENT_CHAT,
'inline_query': ButtonTypes.INLINE,
'callback_data': ButtonTypes.CALLBACK,
}
return _types.get(obj_type, obj_type)
def _generate_from_list(item: list) -> 'Button':
"""
Генерация кнопки из списка
:param item: Список параметров
:return: Новый экземпляр кнопки
"""
data = {'text': item[0]}
if len(item) > 1:
type_ = _format_button_type(item[1])
else:
type_ = ButtonTypes.CALLBACK
if len(item) > 2:
param_value = item[2]
else:
param_value = None
if type_ not in ButtonTypes.all():
raise ConstructorException(f'Invalid button type <{type_}>')
# Типы `callback`, `url` и названия их полей одинаковы, поэтому используем синтаксис dict.get
# Где второй параметр возвращается если не находит ничего в словаре.
# Иначе словарь будет в виде {'callback': 'callback'}
param_key = {ButtonTypes.INLINE: 'inline_query',
ButtonTypes.INLINE_CURRENT_CHAT: 'inline_query_me_chat'
}.get(type_, type_)
data['button_type'] = type_
data[param_key] = param_value if param_value else 'default'
return Button(**data)
class Button(BaseObject):
"""
Описывает объект кнопки
"""
text: str
button_type: ButtonTypes = ButtonTypes.CALLBACK
callback: str = None
url: str = None
inline_query: str = None
inline_query_me_chat: str = None
def __init__(self, text, button_type, callback=None, url=None, inline_query=None,
inline_query_me_chat=None):
if callback is not None and callback.startswith('gen:'):
callback = callback.removeprefix('gen:')
self.text = text
self.button_type = button_type
self.callback = callback
self.url = url
self.inline_query = inline_query
self.inline_query_me_chat = inline_query_me_chat
super(Button, self).__init__(
text=text, callback=callback, url=url, inline_query=inline_query,
inline_query_me_chat=inline_query_me_chat
)
@property
def _param_key(self) -> str:
""" Возвращает именование поля кнопки с данными """
return {
ButtonTypes.INLINE: 'inline_query',
ButtonTypes.INLINE_CURRENT_CHAT: 'inline_query_me_chat'
}.get(self.button_type, self.button_type)
@property
def payload_parameter(self) -> str:
""" Возвращает полезную нагрузку кнопки. """
return getattr(self, self._param_key)
@property
def tg_callback(self) -> Optional[str]:
""" Сгенерированный CallbackData для бота """
if self.callback is not None:
return custom_gen_cb.new(value=self.callback)
else:
return None
def to_constructor(self) -> str:
""" Генерация кнопки в формате конструктора, с поддерживаемыми сокращениями """
obj_type = self.button_type
obj_payload = self.payload_parameter
if obj_type == ButtonTypes.CALLBACK and obj_payload == "default": # Для поддержи сокращений
return self.text
return ' / '.join((self.text, obj_type, obj_payload))
def to_python(self) -> dict:
""" Преобразование кнопки в стандартный объект словаря """
result = super(Button, self).to_python()
if self.callback == "default": # Для поддержи сокращений
del result["callback"]
return result
def to_api(self) -> InlineKeyboardButton:
""" Генерация кнопки как объект телеграма """
return InlineKeyboardButton(
text=self.text,
callback_data=self.tg_callback,
url=self.url,
switch_inline_query=self.inline_query,
switch_inline_query_current_chat=self.inline_query_me_chat
)
@classmethod
def from_list(cls, data_of_button: list) -> 'Button':
"""
Генерирует кнопку из списка
:param data_of_button: Информация о кнопке в виде списка
:return: Экземпляр кнопки
:rtype: :obj:`Button`
"""
return _generate_from_list(data_of_button)
@classmethod
def from_json(cls, data: Union[dict[str], list[dict]]) -> Union[list['Button'], 'Button']:
"""
Генерация кнопки из JSON формата
:param data: Информация в раскодированном JSON
:return: Экземпляр кнопки
:rtype: :obj:`Button`
"""
if len(data) not in range(1, 3):
raise ConstructorException(f"Invalid keys count: {len(data)} in {data!r}")
if len(data) == 1:
return cls.from_list([data['text']])
data = data.copy()
text = data.pop('text')
payload_key = tuple(data.keys())[0]
return cls.from_list([text, _format_button_type(payload_key), data[payload_key]])
| 2.296875
| 2
|
withNoise/SynthGAN_Noise.py
|
lelynn/RF_GANsynth
| 0
|
12778894
|
<filename>withNoise/SynthGAN_Noise.py
import torch.nn as nn
import random
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
import torch.utils.data as dataset
from tqdm import tqdm
import model_file
import module_Noise as module
import RF_module as RF
import torchvision.transforms as transforms
import os
# =================================================
# D O U B L E C H E C K B E F O R E R U N :
# =================================================
load_epoch = True
epoch_loaded = 966
old_runname = 'RFSynthGAN_Noise_gl_0.5'
runname = 'RFSynthGAN_Noise_gl_0.05'
images_set = 'synthetic'
# =================================================
manualSeed = 999
random.seed(manualSeed)
torch.manual_seed(manualSeed)
device = 1
cuda0 = torch.device(f'cuda:{device}')
batch_size = 32
all_image_size = 96
num_epochs = 1000
lr = 0.0002
# Beta1 hyperparam for Adam optimizers
beta1 = 0.8
vgg_beta = 1
G_beta = 0.05
# -----
# Models
# -----
in_channels=192
netG = model_file.ResblocksDeconv(in_channels, (all_image_size,all_image_size))
if load_epoch:
netG.load_state_dict(torch.load(f'{old_runname}/netG_epochs_{epoch_loaded}.model'))
else:
netG.apply(module.weights_init)
netD = module.Discriminator().to(device)
if load_epoch:
netD.load_state_dict(torch.load(f'{old_runname}/netD_epochs_{epoch_loaded}.model', map_location='cpu'))
else:
netD.apply(module.weights_init)
if __name__ == '__main__':
if device >= 0:
netG.cuda(device)
netD.cuda(device)
lossFunction = nn.BCELoss()
vgg_lossFunction = module.VGGLoss(device)
if in_channels == 3:
inputtype = 'V1_V4'
if in_channels == 192:
inputtype = 'all_channels'
# -----
# RF gaus maps
# ------
gaus = module.load_gausdata(size= '96')
seen_images = module.load_ydata(None, size='96')
seen_images_torch = torch.from_numpy(seen_images)
# ------
# Training
# ------
dot_numbers_train = np.load(f'training/training_{images_set[:5]}191final.npy')
training_iterator = module.make_iterator_unique(dot_numbers_train, 'training', batch_size, shuffle = True)
# ------
# Testing
# ------
dot_numbers_test = np.load(f'testing/testing_{images_set[:5]}191final.npy')
testing_iterator = module.make_iterator_unique(dot_numbers_test, 'testing', batch_size, shuffle = False)
real_label = 1
fake_label = 0
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
hori_means, verti_means, std_avg = RF.extract_means_std()
confidence_mask = RF.make_confidence_mask(hori_means, verti_means, std_avg, size = 96)
confidence_mask = torch.from_numpy(confidence_mask.astype('float32')).to(cuda0)
# Losses to append for TRAINING:
G_vgg_losses_train=[]
G_losses_train=[]
vgg_losses_train=[]
D_losses_train=[]
D_losses_real_train=[]
D_losses_fake_train=[]
# Losses to append for TESTING:
G_vgg_losses_test=[]
G_losses_test=[]
vgg_losses_test=[]
D_losses_test=[]
D_losses_real_test=[]
D_losses_fake_test=[]
iters = 0
for epoch in range(num_epochs):
netG.train(True)
G_vgg_loss_train = 0
G_loss_train = 0
vgg_loss_train = 0
D_loss_train = 0
D_loss_real_train = 0
D_loss_fake_train = 0
for dot_number, img_indices in tqdm(training_iterator, total=len(training_iterator)):
# -----
# Inputs
# -----
gaus_expand_to_batch = gaus.expand([len(img_indices), 191, all_image_size, all_image_size])
weight_images = dot_number[:,:,np.newaxis, np.newaxis].expand([len(img_indices), 191, all_image_size, all_image_size])
fixed_noise = torch.randn(len(img_indices), 1, 96, 96)
# We want to use the dot number and repeat it (expand to gaus) such that it will have the same shape.
#Then you multiply with the gaus_exapnd_go_batch!
inputs = module.select_type_inputs(inputtype, gaus_expand_to_batch, weight_images, fixed_noise)
inputs = inputs.to(cuda0)
# -----
# Targets
# -----
target_batch = seen_images_torch[img_indices]
target_batch = target_batch.transpose(3,1).transpose(2,3)
target_batch = target_batch.to(cuda0)
target_batch *= confidence_mask.expand_as(target_batch)
# ==================================================================
# D I S C R I M I N A T O R| Maximizing log(D(x)) + log(1 - D(G(z)))
# ==================================================================
netD.zero_grad()
netG.zero_grad()
# -------------------------
# Train discr. on REAL img
# -------------------------
b_size = target_batch.size(0)
label = torch.full((b_size,), real_label, device=device)
label.fill_(real_label) # fake labels are real for generator cost
outputDreal = netD(target_batch).view(-1)
errD_real = lossFunction(outputDreal, label)
# -------------------------
# Train discr. on FAKE img
# -------------------------
outputGfake = netG(inputs)
outputGfake *= confidence_mask.expand_as(outputGfake)
label = torch.full((b_size,), fake_label, device=device)
label.fill_(fake_label)
outputDfake = netD(outputGfake.detach()).view(-1)
errD_fake = lossFunction(outputDfake, label)
# Add the gradients from the all-real and all-fake batches
errD = errD_real + errD_fake
errD.backward()
optimizerD.step()
# ==================================================================
# G E N E R A T O R| maximize log(D(G(z)))
# ==================================================================
# ------------------------------------------------------------------
# Train generator to fool the discriminator and learn target images
# ------------------------------------------------------------------
netG.zero_grad()
label = torch.full((b_size,), real_label, device=device)
label.fill_(real_label)
# ------------------------------------------------------------------
# a forward pass through the generator
# ------------------------------------------------------------------
outputGfake = netG(inputs)
outputGfake *= confidence_mask.expand_as(outputGfake)
outputDfake = netD(outputGfake).view(-1)
# ------------------------------------------------------------------
# Fake images (determined by distriminator) should become more real
# ------------------------------------------------------------------
errG = lossFunction(outputDfake, label)
# ------------------------------------------------------------------
# WHILE using vgg loss to generate closer to target.
# ------------------------------------------------------------------
vgg_loss = vgg_lossFunction(outputGfake, target_batch)
# ------------------------------------------------------------------
# Combine both losses: with a beta value for vgg
# ------------------------------------------------------------------
errG_vgg = (errG* G_beta) + (vgg_loss * vgg_beta)
errG_vgg.backward()
optimizerG.step()
G_vgg_loss_train += errG_vgg.sum().item()
G_loss_train += errG.sum().item()
vgg_loss_train += vgg_loss.sum().item()
D_loss_train += errD.sum().item()
D_loss_real_train += errD_real.sum().item()
D_loss_fake_train += errD_fake.sum().item()
G_vgg_losses_train.append(G_vgg_loss_train/len(training_iterator.sampler))
G_losses_train.append(G_loss_train/len(training_iterator.sampler))
vgg_losses_train.append(vgg_loss_train/len(training_iterator.sampler))
D_losses_train.append(D_loss_train/len(training_iterator.sampler))
D_losses_real_train.append(D_loss_real_train/len(training_iterator.sampler))
D_losses_fake_train.append(D_loss_fake_train/len(training_iterator.sampler))
# ------------------
# TESTING
# ------------------
with torch.no_grad():
netG.train(False)
netG.eval()
G_vgg_loss_test = 0
G_loss_test = 0
vgg_loss_test = 0
D_loss_test = 0
D_loss_real_test = 0
D_loss_fake_test = 0
for dot_number, img_indices in tqdm(testing_iterator, total=len(testing_iterator)):
# -----
# Inputs
# -----
gaus_expand_to_batch = gaus.expand([len(img_indices), 191, all_image_size, all_image_size])
weight_images = dot_number[:,:,np.newaxis, np.newaxis].expand([len(img_indices), 191, all_image_size, all_image_size])
fixed_noise = torch.randn(len(img_indices), 1, 96, 96)
# We want to use the dot number and repeat it (expand to gaus) such that it will have the same shape.
#Then you multiply with the gaus_exapnd_go_batch!
inputs = module.select_type_inputs(inputtype, gaus_expand_to_batch, weight_images, fixed_noise)
inputs = inputs.to(cuda0)
# -----
# Targets
# -----
target_batch = seen_images_torch[img_indices]
target_batch = target_batch.transpose(3,1).transpose(2,3)
target_batch = target_batch.to(cuda0)
target_batch *= confidence_mask.expand_as(target_batch)
# ==================================================================
# D I S C R I M I N A T O R| testing
# ==================================================================
# -------------------------
# TEST discr. on REAL img
# -------------------------
b_size = target_batch.size(0)
label = torch.full((b_size,), real_label, device=device)
label.fill_(real_label) # fake labels are real for generator cost
outputDreal = netD(target_batch).view(-1)
errD_real = lossFunction(outputDreal, label)
# -------------------------
# TEST discr. on FAKE img
# -------------------------
outputGfake = netG(inputs)
outputGfake *= confidence_mask.expand_as(outputGfake)
label = torch.full((b_size,), fake_label, device=device)
label.fill_(fake_label)
outputDfake = netD(outputGfake.detach()).view(-1)
errD_fake = lossFunction(outputDfake, label)
# Add the gradients from the all-real and all-fake batches
errD = errD_real + errD_fake
# ==================================================================
# G E N E R A T O R| testing
# ==================================================================
# ------------------------------------------------------------------
# TESTING generator: does it fool discrim?
# ------------------------------------------------------------------
label = torch.full((b_size,), real_label, device=device)
label.fill_(real_label)
# ------------------------------------------------------------------
# a forward pass through the generator
# ------------------------------------------------------------------
outputGfake = netG(inputs)
outputGfake *= confidence_mask.expand_as(outputGfake)
outputDfake = netD(outputGfake).view(-1)
# ------------------------------------------------------------------
# Fake images (determined by distriminator) should become more real
# ------------------------------------------------------------------
errG = lossFunction(outputDfake, label)
# ------------------------------------------------------------------
# WHILE using vgg loss to generate closer to target.
# ------------------------------------------------------------------
vgg_loss = vgg_lossFunction(outputGfake, target_batch)
# ------------------------------------------------------------------
# Combine both losses: with a beta value for vgg
# ------------------------------------------------------------------
errG_vgg = (errG * G_beta) + (vgg_loss * vgg_beta)
G_vgg_loss_test += errG_vgg.sum().item()
G_loss_test += errG.sum().item()
vgg_loss_test += vgg_loss.sum().item()
D_loss_test += errD.sum().item()
D_loss_real_test += errD_real.sum().item()
D_loss_fake_test += errD_fake.sum().item()
G_vgg_losses_test.append(G_vgg_loss_test/len(testing_iterator.sampler))
G_losses_test.append(G_loss_test/len(testing_iterator.sampler))
vgg_losses_test.append(vgg_loss_test/len(testing_iterator.sampler))
D_losses_test.append(D_loss_test/len(testing_iterator.sampler))
D_losses_real_test.append(D_loss_real_test/len(testing_iterator.sampler))
D_losses_fake_test.append(D_loss_fake_test/len(testing_iterator.sampler))
# ===================
# S A V I N G: losses
# ===================
# ------------------------------------------------------------------
# TRAINING
# ------------------------------------------------------------------
os.makedirs(runname, exist_ok=True)
np.save(f'{runname}/G_vgg_loss_train', np.array(G_vgg_losses_train))
np.save(f'{runname}/Gloss_train', np.array(G_losses_train))
np.save(f'{runname}/vggloss_train', np.array(vgg_losses_train))
np.save(f'{runname}/Dloss_train', np.array(D_losses_train))
np.save(f'{runname}/Dloss_real_train', np.array(D_losses_real_train))
np.save(f'{runname}/Dloss_fake_train', np.array(D_losses_fake_train))
# ------------------------------------------------------------------
# TESTING
# ------------------------------------------------------------------
np.save(f'{runname}/G_vgg_loss_test', np.array(G_vgg_losses_test))
np.save(f'{runname}/Gloss_test', np.array(G_losses_test))
np.save(f'{runname}/vggloss_test', np.array(vgg_losses_test))
np.save(f'{runname}/Dloss_test', np.array(D_losses_test))
np.save(f'{runname}/Dloss_real_test', np.array(D_losses_real_test))
np.save(f'{runname}/Dloss_fake_test', np.array(D_losses_fake_test))
if load_epoch:
torch.save(netG.state_dict(), f'{runname}/netG_epochs_{epoch+epoch_loaded+1}.model')
torch.save(netD.state_dict(), f'{runname}/netD_epochs_{epoch + epoch_loaded + 1}.model')
print('epochs: ', epoch+epoch_loaded+1)
else:
torch.save(netG.state_dict(), f'{runname}/netG_epochs_{epoch}.model')
torch.save(netD.state_dict(), f'{runname}/netD_epochs_{epoch}.model')
print('epochs: ', epoch)
torch.save(netG.state_dict(), f'{runname}/netG_final.model')
torch.save(netD.state_dict(), f'{runname}/netD_final.model')
| 2.09375
| 2
|
house/house.py
|
devksingh4/imsa-csi-python
| 0
|
12778895
|
<filename>house/house.py
# <NAME>, House, 4/9/2020
from graphics import GraphWin, Rectangle, Point, Polygon, Text
import time # allows me to sleep the program
def main():
win = GraphWin("House", 600, 600)
win.setCoords(0,0,600,600)
Text(Point(300,10),"5 Click House").draw(win)
# Draw the main house
p1 = win.getMouse()
p2 = win.getMouse()
Rectangle(p1, p2).draw(win)
# Draw the door
con = (abs(p1.x) - (p2.x)) / 5
p3 = win.getMouse()
d1 = Point(p3.x + con / 2, p3.y)
d2 = Point(p3.x - con / 2, p1.y)
Rectangle(d1, d2).draw(win)
# Draw the window
p4 = win.getMouse()
w1 = Point(p4.x - con / 4, p4.y + con / 4)
w2 = Point(p4.x + con / 4, p4.y - con / 4)
Rectangle(w1, w2).draw(win)
p5 = win.getMouse()
Polygon(p2, Point(p1.x, p2.y), p5).draw(win)
Text(Point(300,590),"I hoped you liked my house!!").draw(win)
time.sleep(10) # sleep the thread for 10 seconds
main()
| 3.265625
| 3
|
Desafios/Mundo 2/ex051.py
|
ZaikoXander/Python
| 0
|
12778896
|
<filename>Desafios/Mundo 2/ex051.py<gh_stars>0
print('\033[1;97m-' * 23)
print('| \033[91m10 TERMOS DE UMA PA \033[97m|')
print('-' * 23)
ptermo = int(input('Primeiro termo: '))
razao = int(input('Razão: '))
decimo = ptermo + (10 - 1) * razao
print()
for c in range(ptermo, decimo + razao, razao):
print('\033[97m{}'.format(c), end=' \033[91m➝ ')
print('ACABOU')
| 3.5625
| 4
|
src/masonite/managers/__init__.py
|
Abeautifulsnow/masonite
| 95
|
12778897
|
<filename>src/masonite/managers/__init__.py
from .Manager import Manager
from .AuthManager import AuthManager
from .BroadcastManager import BroadcastManager
from .CacheManager import CacheManager
from .MailManager import MailManager
from .QueueManager import QueueManager
from .SessionManager import SessionManager
from .StorageManager import StorageManager
from .UploadManager import UploadManager
| 1.25
| 1
|
logger.py
|
ranihorev/arxiv-sanity-preserver
| 81
|
12778898
|
<filename>logger.py
import logging
from logging.config import dictConfig
def logger_config(path='', info_filename='info.log', num_backups=5):
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
handlers = {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "f"
},
"info_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "INFO",
"formatter": "f",
"filename": path + info_filename,
"maxBytes": 10485760,
"backupCount": num_backups,
"encoding": "utf8"
}
}
loggers = {
"elasticsearch": {
"level": "WARNING",
"propagate": "no"
},
"urllib3": {
"level": "WARNING",
"propagate": "no"
},
"tweepy": {
"level": "WARNING",
"propagate": "no"
},
"prawcore": {
"level": "WARNING",
"propagate": "no"
},
"requests": {
"level": "WARNING",
"propagate": "no"
},
}
root_hanlders = ["console", "info_file_handler",]
logging_config = dict(
version=1,
disable_existing_loggers=False,
formatters={
'f': {'format': '%(asctime)s - %(name)-12s %(levelname)-8s %(message)s'},
'syslog_f': {}
},
handlers=handlers,
loggers=loggers,
root={
"level": "DEBUG",
"handlers": root_hanlders
}
)
dictConfig(logging_config)
| 2.5625
| 3
|
ext2/fs/bgdt.py
|
mrfalcone/pyext2
| 1
|
12778899
|
<filename>ext2/fs/bgdt.py
#!/usr/bin/env python
"""
Defines internal classes for the block group descriptor table used by the ext2 module.
"""
__license__ = "BSD"
__copyright__ = "Copyright 2013, <NAME>"
from struct import pack,unpack_from
from math import ceil
from time import time
from ..error import FilesystemError
class _BGDTEntry(object):
"""Models an entry in the block group descriptor table. For internal use only."""
@property
def blockBitmapLocation(self):
"""Gets the block id of the block bitmap for this block group."""
return self._blockBitmapBid
@property
def inodeBitmapLocation(self):
"""Gets the block id of the inode bitmap for this block group."""
return self._inodeBitmapBid
@property
def inodeTableLocation(self):
"""Gets the block id of the inode table for this block group."""
return self._inodeTableBid
@property
def numFreeBlocks(self):
"""Gets the number of free blocks."""
return self._numFreeBlocks
@numFreeBlocks.setter
def numFreeBlocks(self, value):
"""Sets the number of free blocks."""
self._numFreeBlocks = value
self.__writeData(12, pack("<H", self._numFreeBlocks))
@property
def numFreeInodes(self):
"""Gets the number of free inodes."""
return self._numFreeInodes
@numFreeInodes.setter
def numFreeInodes(self, value):
"""Sets the number of free inodes."""
self._numFreeInodes = value
self.__writeData(14, pack("<H", self._numFreeInodes))
@property
def numInodesAsDirs(self):
"""Gets the number of inodes used as directories."""
return self._numInodesAsDirs
@numInodesAsDirs.setter
def numInodesAsDirs(self, value):
"""Sets the number of inodes used as directories."""
self._numInodesAsDirs = value
self.__writeData(16, pack("<H", self._numInodesAsDirs))
def __init__(self, startPos, device, superblock, fields):
"""Creates a new BGDT entry from the given fields."""
self._superblock = superblock
self._device = device
self._startPos = startPos
self._blockBitmapBid = fields[0]
self._inodeBitmapBid = fields[1]
self._inodeTableBid = fields[2]
self._numFreeBlocks = fields[3]
self._numFreeInodes = fields[4]
self._numInodesAsDirs = fields[5]
def __writeData(self, offset, byteString):
"""Writes the specified string of bytes at the specified offset (from the start of the bgdt entry bytes)
on the device."""
for groupId in self._superblock.copyLocations:
groupStart = groupId * self._superblock.numBlocksPerGroup * self._superblock.blockSize
tableStart = groupStart + (self._superblock.blockSize * (self._superblock.firstDataBlockId + 1))
self._device.write(tableStart + self._startPos + offset, byteString)
if not self._superblock._saveCopies:
break
self._superblock.timeLastWrite = int(time())
class _BGDT(object):
"""Models the block group descriptor table for an Ext2 filesystem, storing information about
each block group. For internal use only."""
@property
def entries(self):
"""Gets the list of BGDT entries. Indexes are block group ids."""
return self._entries
@classmethod
def new(cls, bgNumCopy, superblock, device):
"""Creates a new BGDT at the specified block group number, along with bitmaps,
and returns the new object."""
startPos = (bgNumCopy * superblock.numBlocksPerGroup + superblock.firstDataBlockId + 1) * superblock.blockSize
numBgdtBlocks = int(ceil(float(superblock.numBlockGroups * 32) / superblock.blockSize))
inodeTableBlocks = int(ceil(float(superblock.numInodesPerGroup * superblock.inodeSize) / superblock.blockSize))
bgdtBytes = ""
for bgroupNum in range(superblock.numBlockGroups):
bgroupStartBid = bgroupNum * superblock.numBlocksPerGroup + superblock.firstDataBlockId
blockBitmapLocation = bgroupStartBid
inodeBitmapLocation = bgroupStartBid + 1
inodeTableLocation = bgroupStartBid + 2
numInodesAsDirs = 0
numUsedBlocks = 2 + inodeTableBlocks
if bgroupNum in superblock.copyLocations: # account for superblock and bgdt blocks
numUsedBlocks += (1 + numBgdtBlocks)
blockBitmapLocation += (1 + numBgdtBlocks)
inodeBitmapLocation += (1 + numBgdtBlocks)
inodeTableLocation += (1 + numBgdtBlocks)
numUsedInodes = 0
if bgroupNum == 0:
numUsedInodes += (superblock.firstInode - 1)
numFreeInodes = superblock.numInodesPerGroup - numUsedInodes
if bgroupNum != superblock.numBlockGroups - 1: # if not the final block group
numTotalBlocksInGroup = superblock.numBlocksPerGroup
else:
numTotalBlocksInGroup = superblock.numBlocks - bgroupStartBid
numFreeBlocks = numTotalBlocksInGroup - numUsedBlocks
if numFreeBlocks < 0:
raise FilesystemError("Not enough blocks specified.")
# if this is the first copy of the BGDT being written, also write new bitmaps
if bgNumCopy == 0:
fmt = ["B"] * superblock.blockSize
blockBitmap = [0] * superblock.blockSize
bitmapIndex = 0
for i in range(numUsedBlocks):
blockBitmap[bitmapIndex] <<= 1
blockBitmap[bitmapIndex] |= 1
if (i+1) % 8 == 0:
bitmapIndex += 1
# write end padding
padBitIndex = numTotalBlocksInGroup
while padBitIndex < superblock.blockSize:
blockBitmap[padBitIndex >> 8] |= (1 << (padBitIndex & 0x07))
padBitIndex += 1
blockBitmapBytes = "".join(map(pack, fmt, blockBitmap))
device.write(blockBitmapLocation * superblock.blockSize, blockBitmapBytes)
inodeBitmap = [0] * superblock.blockSize
bitmapIndex = 0
for i in range(numUsedInodes):
inodeBitmap[bitmapIndex] <<= 1
inodeBitmap[bitmapIndex] |= 1
if (i+1) % 8 == 0:
bitmapIndex += 1
inodeBitmapBytes = "".join(map(pack, fmt, inodeBitmap))
device.write(inodeBitmapLocation * superblock.blockSize, inodeBitmapBytes)
entryBytes = pack("<3I3H", blockBitmapLocation, inodeBitmapLocation, inodeTableLocation,
numFreeBlocks, numFreeInodes, numInodesAsDirs)
zeros = [0] * 14
fmt = ["B"] * 14
bgdtBytes = "{0}{1}{2}".format(bgdtBytes, entryBytes, "".join(map(pack, fmt, zeros)))
device.write(startPos, bgdtBytes)
return cls(bgdtBytes, superblock, device)
@classmethod
def read(cls, groupId, superblock, device):
"""Reads a BDGT at the specified group number and returns the new object."""
startPos = (groupId * superblock.numBlocksPerGroup + superblock.firstDataBlockId + 1) * superblock.blockSize
tableSize = superblock.numBlockGroups * 32
bgdtBytes = device.read(startPos, tableSize)
if len(bgdtBytes) < tableSize:
raise FilesystemError("Invalid block group descriptor table.")
return cls(bgdtBytes, superblock, device)
def __init__(self, bgdtBytes, superblock, device):
"""Constructs a new BGDT from the given byte array."""
self._entries = []
for i in range(superblock.numBlockGroups):
startPos = i * 32
fields = unpack_from("<3I3H", bgdtBytes, startPos)
self._entries.append(_BGDTEntry(startPos, device, superblock, fields))
| 2.25
| 2
|
scripts/transform_wiki_to_openapi.py
|
will7200/go-crypto-sync
| 4
|
12778900
|
<reponame>will7200/go-crypto-sync
import os
import re
from ruamel.yaml import YAML
import pandas as pd
from io import StringIO
from urllib.parse import urlparse, parse_qs
import json
pd.options.display.max_columns = 7
pd.options.display.width = 200
example = """
# Acquire Market Statistics
* Request description: Acquire real-time market data
* Request type: GET
* Signature required: No
* Request Url: [https://api.coinex.com/v1/market/ticker?market=BCHBTC](https://api.coinex.com/v1/market/ticker?market=BCHBTC)
* Request parameter:
| name | type | required | description |
| :--- | :--- | :--- | :--- |
| market | String | Yes | See<API invocation description·market> |
* Return value description:
| name | type | description |
| :--- | :--- | :--- |
| date | String | server time when returning |
| last | String | latest price |
| buy | String | buy 1 |
| buy_amount | String | buy 1 amount|
| sell | String | sell 1 |
| sell_amount | String | sell 1 amount|
| open | String | 24H open price |
| high | String | 24H highest price |
| low | String | 24H lowest price |
| vol | String | 24H volume |
* Example:
```
# Request
GET https://api.coinex.com/v1/market/ticker?market=bchbtc
# Response
{
"code": 0,
"data": {
"date": 1513865441609, # server time when returning
"ticker": {
"buy": "10.00", # buy 1
"buy_amount": "10.00", # buy 1 amount
"open": "10", # highest price
"high": "10", # highest price
"last": "10.00", # latest price
"low": "10", # lowest price
"sell": "10.00", # sell 1
"sell_amount": "0.78", # sell 1 amount
"vol": "110" # 24H volume
}
},
"message": "Ok"
}
```
# Acquire All Market Data
* Request description: acquire all market data
* Request type: GET
* Signature required: No
* Request Url:[https://api.coinex.com/v1/market/ticker/all](https://api.coinex.com/v1/market/ticker/all)
* Request parameter:
None
* Return value description:
| name | type | description |
| :--- | :--- | :--- |
| date | String | server time when returning |
| buy | String | buy 1 |
| buy_amount | String | buy 1 amount|
| high | String | 24H highest price |
| last | String | latest price |
| low | String | 24H lowest price |
| sell | String | sell 1 |
| sell_amount | String | sell 1 amount|
| vol | String | 24H volumn |
* Example:
```
# Request
GET https://api.coinex.com/v1/market/ticker/all
# Response
{
"code": 0,
"data": {
"date": 1513865441609,
"ticker": {
"BCHBTC": {
"buy": "0.222",
"buy_amount": "0.1",
"open": "0.2322211",
"high": "0.2322211",
"last": "0.2322211",
"low": "0.222",
"sell": "0.3522211",
"sell_amount": "0.11",
"vol": "2.01430624"
},
}
},
"message": "Ok"
}
```
"""
api_regex = re.compile(r'[0-9]+([a-z]+)\_api', re.IGNORECASE | re.MULTILINE)
operation_regex = re.compile(r'^\#\ (.*)', re.MULTILINE)
details_regex = re.compile(
r'^\#\ (?P<operation_name>[A-Za-z \-_]+)\W+'
r'Request description:\W{0,1}(?P<description>[\w\W]+?)\W+'
r'Request type\:\W(?P<operation_type>[\w]+)\W+'
r'(Signature required:\W(?P<signature_required>[\w]+)\W+)?'
r'(Rate limit\W+(?P<rate_limit>.*)\W+)?'
r'(Request Header:\W(?P<request_header>[\w\W\<\>\\]+)\W+)?'
r'Request Url\:\[(?P<request_url>https\:\/\/[\w\W]*?)\]\(https://[\w\W]+?\)\W+'
r'(Request parameter:\W+?(?P<request_parameters>[\| \w\n\:\-\.\<\>\_]+|None)\W+?(?P<request_parameter_notes>[\w\W]*?)?\W+)?'
r'(Return value description:\W+(?P<response_details>\|[\w\W]+\|)?\W+)?'
r'Example\W+Request\W+(?P<request_example>.*)\W+'
r'(Request\.Body(?P<request_body_example>[\W\w]+)\W+)?'
r'Response\W+(?P<response_example>\{[\W\w]+\})\W+')
api_endpoints = []
for root, dirs, files in os.walk("../tmp/coinex_exchange_api.wiki", topdown=False):
if '.git' in root:
continue
api_group = api_regex.findall(root)
if len(api_group) == 0 or api_group[0] == 'rest':
continue
group = []
for file in files:
with open(os.path.join(root, file), encoding='utf-8') as f:
data = f.read().strip('\n').replace('\u0008', '').replace(u'\xb7', ' ')
operations = list(
filter(lambda x: x != 'Request' and x != 'Response' and x != 'Request.Body', operation_regex.findall(data)))
if len(operations) == 1:
d = [m.groupdict() for m in details_regex.finditer(data)]
group.append(d)
if len(d) == 0:
raise Exception('error')
else:
sections = [data]
for operation in operations[::-1]:
section = sections.pop(0).split(f'{operation}', 1)
sections = [section[0]] + [f'# {operation}' + section[1]] + sections
sections = list(filter(lambda x: x != '', sections))
for section in sections:
if section == '# ':
continue
d = [m.groupdict() for m in details_regex.finditer(section)]
group.append(d)
api_endpoints.append((api_group, group))
dfs = []
for group_name, endpoints in api_endpoints:
for endpoint in endpoints:
df = pd.DataFrame.from_dict(endpoint)
df['group_name'] = group_name
dfs.append(df)
# create
df = pd.concat(dfs)
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
from ruamel.yaml.representer import LiteralScalarString
yaml = YAML()
def str_presenter(dumper, data):
"""For handling multiline strings"""
if len(data) > 72: # check for multiline string
return dumper.represent_scalar("tag:yaml.org,2002:str", LiteralScalarString(data), style="|")
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
yaml.representer.add_representer(str, str_presenter)
@yaml.register_class
class Dict(dotdict):
def get(self, key, default=lambda: Dict()):
val = dict.get(self, key)
if val is None and default is not None:
self[key] = default()
return dict.__getitem__(self, key)
@classmethod
def to_yaml(cls, representer, data):
if data is None:
return representer.represent_none()
return representer.represent_dict(dict(data))
def set(self, value):
self = Dict(value)
base = yaml.load("""
openapi: 3.0.2
info:
title: CoinEx API
description: |
Open and simple, CoinEx API makes sure that you can build your own trading tools to achieve a more effective trading strategy. CoinEx API is now available for these features:
version: 2021-05-29
servers:
- url: https://api.coinex.com/v1
description: Coinex Production Server
""")
openapi = Dict(base)
paths = openapi.get('paths')
components = openapi.get('components')
components.get('parameters')['CX-ACCESS-SIGN'] = yaml.load("""
CX-ACCESS-SIGN:
in: header
name: authorization
required: true
description: |
Signature is required for Account API and trading API related interfaces.
The signature data is placed in the authorization header of the HTTP header and authorization is the signature result string.
No signature is required for market API related interfaces.
Use 32-bit MD5 Algorithm Signature
Use MD5 algorithm to encrypt the signature string, convert encrypted result to uppercase, get signature data and put signature data in HTTP Header - authorization.
schema:
type: string
x-go-default-value: "auto"
x-go-default: true
""")['CX-ACCESS-SIGN']
components.get('schemas')['UnknownResponse'] = {
'type' : 'object',
'description': 'Unknown Response'
}
for group_name, endpoints in api_endpoints:
for endpoint in [dotdict(l2) for l1 in endpoints for l2 in l1]:
path = endpoint.request_url.replace('https://api.coinex.com/v1', '').split('?')[0]
methods = paths.get(path)
method = endpoint.operation_type.strip().lower()
if method in methods:
continue
dmethod = methods.get(method)
dmethod.summary = endpoint.operation_name
dmethod.description = endpoint.description.strip(
'*').strip() + (f'\nRate Limit: {endpoint.rate_limit}' if endpoint.rate_limit else '')
dmethod.tags = list(group_name)
dmethod.operationId = ''.join(
[x.title() for x in endpoint.operation_name.replace('-', ' ').split(' ')])
parameters = dmethod.get('parameters', default=lambda: [])
if endpoint.signature_required and endpoint.signature_required.strip() == 'Yes':
parameters.append({
'$ref': '#/components/parameters/CX-ACCESS-SIGN'
})
types_lookup = {
'String' : ('string', {}),
'Integer': ('integer', {'format': 'int64'}),
'Long' : ('number', {'format': 'double'}),
'Array' : ('array', lambda: {'items': Dict({'type': 'object'})}),
'Object' : ('object', {}),
'bool' : ('boolean', {})
}
if endpoint.request_parameters:
parameters_table = pd.read_table(
StringIO(endpoint.request_parameters), sep="|", header=0, skipinitialspace=True
).dropna(axis=1, how='all').iloc[1:]
parameters_table.columns = list(map(lambda x: x.strip(), parameters_table.columns))
if endpoint.request_body_example is None:
parsed_url = urlparse(endpoint.request_example.split(' ', 1)[1])
query_dict = parse_qs(parsed_url.query)
for index, row in parameters_table.iterrows():
_type, format = types_lookup[row.type.strip().replace('Interger', 'Integer')]
parameters.append({
'in' : 'query',
'name' : row.get('name').strip(),
'required': True if 'yes' in row.required.lower() else False,
'schema' : {
'type': _type,
**format
},
})
if pd.notna(row.description):
parameters[-1]['description'] = row.description.strip()
example = query_dict.get(row.get('name').strip(), None)
if example:
parameters[-1]['example'] = example[0]
else:
dmethod.get('requestBody').get('content')['application/json'] = {
'schema': {'$ref': f'#/components/schemas/{dmethod.operationId}Request'},
}
components.get('schemas')[f'{dmethod.operationId}Request'] = Dict({
'type' : 'object',
'example': LiteralScalarString(endpoint.request_body_example.strip().strip('#'))
})
requestBody = components.get('schemas')[f'{dmethod.operationId}Request'].get('properties')
for index, row in parameters_table.iterrows():
key = row.get('name').strip()
_type, format = types_lookup[row.type.strip().replace('Interger', 'Integer')]
requestBody[key] = {
'type': _type,
**format
}
if pd.notna(row.required) and 'yes' in row.required.lower():
components.get('schemas')[f'{dmethod.operationId}Request'].get('required',
default=lambda: []).append(key)
if pd.notna(row.description):
requestBody[key]['description'] = row.description.strip()
responses = dmethod.get('responses')
responses['200'] = Dict({
'description': 'response info',
})
if endpoint.response_details:
key = f'{dmethod.operationId}Response'
responses.get('200').get('content')['application/json'] = Dict({
'schema': {
'type' : 'object',
'properties': {
'code' : {
'type' : 'integer',
'format': 'int64'
},
'message': {
'type': 'string'
},
'data' : {'$ref': f'#/components/schemas/{key}'}
}
}
})
response_table = pd.read_table(
StringIO(endpoint.response_details), sep="|", header=0, skipinitialspace=True
).dropna(axis=1, how='all').iloc[1:]
response_table.columns = list(map(lambda x: x.strip(), response_table.columns))
components.get('schemas')[key] = Dict({
'type' : 'object',
'description': f'{dmethod.operationId} Response Value',
})
if endpoint.response_example:
try:
components.get('schemas')[key]['example'] = json.loads(endpoint.response_example.strip().strip('#'))['data']
except json.decoder.JSONDecodeError:
components.get('schemas')[key]['example'] = LiteralScalarString(
endpoint.response_example.strip().strip('#'))
responseBody = components.get('schemas')[key].get('properties')
for index, row in response_table.iterrows():
if '[' in row.get('name'):
continue
name = row.get('name').strip().replace('\\', '')
_type, format2 = types_lookup[row.get('type', 'Object').strip().replace('Interger', 'Integer')]
responseBody[name] = {
'type': _type,
}
try:
responseBody[name].update(format2)
except TypeError:
responseBody[name].update(format2())
if pd.notna(row.description):
responseBody[name]['description'] = row.description.strip()
else:
responses.get('200').get('content')['application/json'] = Dict({
'schema': {
'type' : 'object',
'properties': {
'code' : {
'type' : 'integer',
'format': 'int64'
},
'message': {
'type': 'string'
},
'data' : {
'$ref': f'#/components/schemas/UnknownResponse'
}
}
}
})
with open('../api/coinex.yml', 'w', encoding='utf-8') as f:
yaml.dump(openapi, f)
| 2.6875
| 3
|
projectwo/servicetwo/test_servicetwo.py
|
ayonadee/prizegenerator
| 0
|
12778901
|
<reponame>ayonadee/prizegenerator
from unittest.mock import patch
from flask import url_for
from flask_testing import TestCase
from app import app
import requests
class TestBase(TestCase):
def create_app(self):
return app
class TestViews(TestBase):
def test_get_randomnumber(self):
response = self.client.get(url_for('randomnum'))
self.assertEqual(response.status_code, 200)
class TestNumber(TestBase):
def test_len(self):
response = self.client.get(url_for('randomnum'))
self.assertEqual(len(response.data), 4)
def test_number(self):
with patch('random.randint') as i:
i.return_value = 1
response = self.client.get(url_for('randomnum'))
self.assertIn(b'1111',response.data)
| 2.734375
| 3
|
apps/trade/urls.py
|
shao-169/SLTP
| 0
|
12778902
|
# _*_ encoding:utf-8 _*_
from django.conf.urls import url
from .views import *
__author__ = 'YZF'
__date__ = '2018/4/5,20:20'
urlpatterns =[
url(r'^list/',FightListView.as_view(),name='fight_list'),
url(r'^class/(?P<fight_id>.*)/', FightDetailView.as_view(), name='fight_class'),
url(r'^addcart/',AddCartView.as_view(), name='fight_addcart'),
url(r'^removecart/',DeleteCartFightView.as_view(), name='fight_remove'),
url(r'^cart/',CartView.as_view(), name='fight_cart'),
url(r'^order/', OrderDetailView.as_view(), name='fight_order'),
url(r'^comfirm/', ConfirmOrderView.as_view(), name='fight_confirm'),
url(r'^myorder/', MyOrderDetailView.as_view(), name='myorder'),
url(r'^gopay/(?P<order_id>.*)/', PayFightDetailView.as_view(), name='fight_gopay'),
url(r"^pay/" ,AliPayView.as_view(),name="fight_alipay"),
]
| 1.578125
| 2
|
bot/components/token.py
|
fossabot/jdan734-bot
| 0
|
12778903
|
import telebot
import os
import json
if "TOKEN" in os.environ:
bot = telebot.TeleBot(os.environ["TOKEN"])
heroku = True
else:
with open("../token2.json") as token:
heroku = False
bot = telebot.TeleBot(json.loads(token.read())["token"])
| 2.421875
| 2
|
tsundoku/blueprints/api/__init__.py
|
fossabot/Tsundoku
| 0
|
12778904
|
<filename>tsundoku/blueprints/api/__init__.py
from .routes import api_blueprint
| 1.203125
| 1
|
external/iotivity/iotivity_1.2-rel/build_common/iotivityconfig/compiler/default_configuration.py
|
SenthilKumarGS/TizenRT
| 1,433
|
12778905
|
# ------------------------------------------------------------------------
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
from configuration import Configuration
# Default (very simple) compiler configuration
class DefaultConfiguration(Configuration):
def __init__(self, context):
Configuration.__init__(self, context)
# ------------------------------------------------------------
# Return test program to be used when checking for basic C++11
# support.
# ------------------------------------------------------------
def _c99_test_program(self):
return """
// Some headers found in C99.
#include <stdbool.h>
#include <stdint.h>
int main()
{
struct foo
{
bool b; // C99 type
int i;
uint64_t q; // C99 type
};
// Designated initializer.
struct foo bar = { .b = false, .q = UINT64_MAX };
// Implicitly initialized field.
return bar.i != 0;
}
"""
# --------------------------------------------------------------
# Get list of flags that could potentially enable C99 support.
#
# The default configuration assumes that no flag is needed to
# enable C99 support.
# --------------------------------------------------------------
def _c99_flags(self):
return []
# ------------------------------------------------------------
# Return test program to be used when checking for basic C++11
# support.
# ------------------------------------------------------------
def _cxx11_test_program(self):
return """
int main()
{
int x = 3210;
auto f = [x](){
return x;
};
return f() != x;
}
"""
# --------------------------------------------------------------
# Get list of flags that could potentially enable C++11 support.
#
# The default configuration assumes that no flag is needed to
# enable C++11 support.
# --------------------------------------------------------------
def _cxx11_flags(self):
return []
| 2.03125
| 2
|
sktime_dl/classification/_lstmfcn.py
|
Sugam10/sktime-dl
| 0
|
12778906
|
__author__ = "<NAME>"
import numpy as np
from tensorflow import keras
from sktime_dl.classification._classifier import BaseDeepClassifier
from sktime_dl.networks._lstmfcn import LSTMFCNNetwork
from sktime_dl.utils import check_and_clean_data, \
check_and_clean_validation_data
from sktime_dl.utils import check_is_fitted
from sklearn.utils import check_random_state
class LSTMFCNClassifier(BaseDeepClassifier, LSTMFCNNetwork):
"""
Implementation of LSTMFCNClassifier from Karim et al (2019). [1]_
Overview:
Combines an LSTM arm with a CNN arm. Optionally uses an attention mechanism in the LSTM which the
author indicates provides improved performance.
Parameters
----------
nb_epochs: int, default=1500
the number of epochs to train the model
param batch_size: int, default=128
the number of samples per gradient update.
kernel_sizes: list of ints, default=[8, 5, 3]
specifying the length of the 1D convolution windows
filter_sizes: int, list of ints, default=[128, 256, 128]
size of filter for each conv layer
num_cells: int, default=8
output dimension for LSTM layer
dropout: float, default=0.8
controls dropout rate of LSTM layer
attention: boolean, default=False
If True, uses custom attention LSTM layer
callbacks: keras callbacks, default=ReduceLRonPlateau
Keras callbacks to use such as learning rate reduction or saving best model based on validation error
random_state: int,
seed to any needed random actions
verbose: boolean,
whether to output extra information
model_name: string,
the name of this model for printing and file writing purposes
model_save_directory: string,
if not None; location to save the trained keras model in hdf5 format
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, default=None
Seed for random, integer.
Attributes
----------
nb_classes : int
Number of classes. Extracted from the data.
References
----------
@article{Karim_2019,
title={Multivariate LSTM-FCNs for time series classification},
volume={116},
ISSN={0893-6080},
url={http://dx.doi.org/10.1016/j.neunet.2019.04.014},
DOI={10.1016/j.neunet.2019.04.014},
journal={Neural Networks},
publisher={Elsevier BV},
author={<NAME> and <NAME> and <NAME> and <NAME>},
year={2019},
month={Aug},
pages={237–245}
}
Example
-------
from sktime_dl.classification import LSTMFCNClassifier
from sktime.datasets import load_italy_power_demand
X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
clf = LSTMFCNClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
"""
def __init__(
self,
nb_epochs=1500,
batch_size=8,
kernel_sizes=[8, 5, 3],
filter_sizes=[128, 256, 128],
num_cells=8,
dropout=0.8,
attention=False,
callbacks=[],
random_state=0,
verbose=False,
model_name="lstmfcn",
model_save_directory=None,
):
super(LSTMFCNClassifier, self).__init__(
model_name=model_name, model_save_directory=model_save_directory
)
self.verbose = verbose
self._is_fitted = False
# calced in fit
self.classes_ = None
self.nb_classes = -1
self.input_shape = None
self.model = None
self.history = None
# predefined
self.nb_epochs = nb_epochs
self.batch_size = batch_size
self.kernel_sizes = kernel_sizes
self.filter_sizes = filter_sizes
self.NUM_CELLS=num_cells
self.dropout=dropout
self.attention=attention
self.callbacks = callbacks
self.random_state = random_state
self.verbose = verbose
self._is_fitted = False
def build_model(self, input_shape, nb_classes, **kwargs):
"""
Construct a compiled, un-trained, keras model that is ready for
training
----------
input_shape : tuple
The shape of the data fed into the input layer
nb_classes: int
The number of classes, which shall become the size of the output
layer
Returns
-------
output : a compiled Keras Model
"""
input_layers, output_layer = self.build_network(input_shape, **kwargs)
output_layer = keras.layers.Dense(nb_classes, activation="softmax")(
output_layer
)
model = keras.models.Model(inputs=input_layers, outputs=output_layer)
model.compile(
loss="categorical_crossentropy",
optimizer='adam',
metrics=["accuracy"],
)
# file_path = self.output_directory + 'best_model.hdf5'
# model_checkpoint = keras.callbacks.ModelCheckpoint(
# filepath=file_path, monitor='val_loss',
# save_best_only=True)
# self.callbacks = [model_checkpoint]
if self.callbacks==None:
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.7,
patience=50, min_lr=0.0001)
self.callbacks = [reduce_lr]
else:
pass
return model
def fit(self, X, y, input_checks=True, validation_X=None,
validation_y=None, **kwargs):
"""
Fit the classifier on the training set (X, y)
----------
X : a nested pd.Dataframe, or (if input_checks=False) array-like of
shape = (n_instances, series_length, n_dimensions)
The training input samples. If a 2D array-like is passed,
n_dimensions is assumed to be 1.
y : array-like, shape = [n_instances]
The training data class labels.
input_checks : boolean
whether to check the X and y parameters
validation_X : a nested pd.Dataframe, or array-like of shape =
(n_instances, series_length, n_dimensions)
The validation samples. If a 2D array-like is passed,
n_dimensions is assumed to be 1.
Unless strictly defined by the user via callbacks (such as
EarlyStopping), the presence or state of the validation
data does not alter training in any way. Predictions at each epoch
are stored in the model's fit history.
validation_y : array-like, shape = [n_instances]
The validation class labels.
Returns
-------
self : object
"""
self.random_state = check_random_state(self.random_state)
X = check_and_clean_data(X, y, input_checks=input_checks)
y_onehot = self.convert_y(y)
validation_data = \
check_and_clean_validation_data(validation_X, validation_y,
self.label_encoder,
self.onehot_encoder)
# ignore the number of instances, X.shape[0],
# just want the shape of each instance
self.input_shape = X.shape[1:]
if validation_data is not None:
validation_data = (
validation_data[0],
validation_data[1]
)
self.model = self.build_model(self.input_shape, self.nb_classes)
if self.verbose:
self.model.summary()
self.history = self.model.fit(
X,
y_onehot,
batch_size=self.batch_size,
epochs=self.nb_epochs,
verbose=self.verbose,
validation_data=(validation_data),
callbacks=self.callbacks,
)
self.save_trained_model()
self._is_fitted = True
return self
def predict_proba(self, X, input_checks=True, **kwargs):
"""
Find probability estimates for each class for all cases in X.
Parameters
----------
X : a nested pd.Dataframe, or (if input_checks=False) array-like of
shape = (n_instances, series_length, n_dimensions)
The training input samples. If a 2D array-like is passed,
n_dimensions is assumed to be 1.
input_checks: boolean
whether to check the X parameter
Returns
-------
output : array of shape = [n_instances, n_classes] of probabilities
"""
check_is_fitted(self)
X = check_and_clean_data(X, input_checks=input_checks)
probs = self.model.predict(X, **kwargs)
# check if binary classification
if probs.shape[1] == 1:
# first column is probability of class 0 and second is of class 1
probs = np.hstack([1 - probs, probs])
return probs
| 2.328125
| 2
|
aplatam/console/train.py
|
fossabot/ap-latam
| 31
|
12778907
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Train a detection model from an already prepared dataset.
"""
import argparse
import logging
import os
import random
import sys
import warnings
import rasterio
from aplatam import __version__
from aplatam.build_trainset import CnnTrainsetBuilder
from aplatam.train_classifier import train
from aplatam.util import all_raster_files
__author__ = "<NAME>"
__copyright__ = __author__
__license__ = "new-bsd"
_logger = logging.getLogger(__name__)
# Number of bands that all rasters must have
BAND_COUNT = 4
# Default output model filename
DEFAULT_MODEL_FILENAME = 'model.h5'
def parse_args(args):
"""
Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=('Prepare a dataset from a set of preprocessed rasters '
'and a vector file of polygons and train a detection '
'model.'))
# Mandatory arguments
parser.add_argument(
'rasters_dir', help='directory containing raster images')
parser.add_argument('vector', help='vector file of training polygons')
parser.add_argument(
'output_dir', help='directory of output training dataset')
# Options
parser.add_argument(
'-o',
'--output-model',
default=None,
help=('filename for output model. '
'Default: OUTPUT_DIR/model.h5'))
parser.add_argument(
'--seed', type=int, help='seed number for the random number generator')
parser.add_argument("--size", type=int, default=256, help="window size")
parser.add_argument(
"--step-size",
type=int,
default=128,
help="step size for sliding window")
parser.add_argument(
"--buffer-size",
type=int,
default=0,
help=
"if buffer_size > 0, polygons are expanded with a fixed-sized buffer")
parser.add_argument(
"--rasters-contour",
help="path to rasters contour vector file (optional)")
parser.add_argument(
"--rescale-intensity",
dest='rescale_intensity',
default=True,
action='store_true',
help="rescale intensity")
parser.add_argument(
"--no-rescale-intensity",
dest='rescale_intensity',
action='store_false',
help="do not rescale intensity")
parser.add_argument(
"--lower-cut",
type=int,
default=2,
help=
"lower cut of percentiles for cumulative count in intensity rescaling")
parser.add_argument(
"--upper-cut",
type=int,
default=98,
help=
"upper cut of percentiles for cumulative count in intensity rescaling")
parser.add_argument(
"--block-size", type=int, default=1, help="block size multiplier")
parser.add_argument(
"--test-size",
type=float,
default=0.25,
help=("proportion of the dataset to include in the test split. "
"Float number between 0.0 and 1.0"))
parser.add_argument(
"--balancing-multiplier",
type=float,
default=1.0,
help=
"proportion of false samples w.r.t true samples (e.g. 1.0 = 50%% true, 50%% false)"
)
parser.add_argument(
"--trainable-layers",
type=int,
default=5,
help="number of upper layers of ResNet-50 to retrain")
parser.add_argument("--batch-size", type=int, default=5, help="Batch size")
parser.add_argument(
"--epochs", type=int, default=20, help="number of epochs to run")
parser.add_argument(
'--version',
action='version',
version='aplatam {ver}'.format(ver=__version__))
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""
Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(
level=loglevel,
stream=sys.stdout,
format=logformat,
datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""
Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
# Set default output model path, if not set
if args.output_model:
output_model = args.output_model
else:
output_model = os.path.join(args.output_dir, DEFAULT_MODEL_FILENAME)
opts = dict(
size=args.size,
step_size=args.step_size,
buffer_size=args.buffer_size,
rescale_intensity=args.rescale_intensity,
lower_cut=args.lower_cut,
upper_cut=args.upper_cut,
block_size=args.block_size,
test_size=args.test_size,
balancing_multiplier=args.balancing_multiplier,
rasters_contour=args.rasters_contour)
_logger.info('Options: %s', opts)
# Set seed number
if args.seed:
_logger.info('Seed: %d', args.seed)
random.seed(args.seed)
_logger.info('Collect all rasters from %s', args.rasters_dir)
rasters = all_raster_files(args.rasters_dir)
validate_rasters_band_count(rasters)
if not os.path.exists(args.output_dir):
builder = CnnTrainsetBuilder(rasters, args.vector, **opts)
builder.build(args.output_dir)
# Train and save model
train(
output_model,
args.output_dir,
trainable_layers=args.trainable_layers,
batch_size=args.batch_size,
epochs=args.epochs,
size=args.size)
_logger.info('Done')
def validate_rasters_band_count(rasters):
"""Validate all rasters have at least 3 bands
Returns True if they are all valid.
Otherwise it raises a RuntimeError.
"""
_logger.debug('Validate rasters band count')
for raster_path in rasters:
count = get_raster_band_count(raster_path)
if count < 3:
raise RuntimeError(
'Raster {} has {} bands, but should have 3 (true color RGB)'.
format(raster_path, count))
if count >= 3:
warnings.warn(
('Raster {} has more than 3 bands ({}). '
'Going to assume the first 3 bands are RGB...').format(
raster_path, count))
return True
def get_raster_band_count(raster_path):
"""Return band count of +raster_path+"""
with rasterio.open(raster_path) as dataset:
return dataset.count
def run():
"""Entry point for console_scripts"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| 2.78125
| 3
|
minjector/providers/classprovider.py
|
MichaelSchneeberger/minjector
| 0
|
12778908
|
<filename>minjector/providers/classprovider.py
from minjector.providers.providerbase import ProviderBase
from minjector.readermonad.reader import Reader
from minjector.readermonad.readermonadop import ReaderMonadOp
from minjector.core.variableenvironment import VariableEnvironment
class ClassProvider(ProviderBase):
def __init__(self, cls):
self._cls = cls
def get(self, key):
def brackets(env: VariableEnvironment):
# def func():
cls = self._cls
instance = cls.__new__(cls)
init = cls.__init__
func = init(instance)
if isinstance(func, Reader):
new_env = func((key, env))
else:
new_env = env.add_object(key, instance)
return new_env
var_provider = ReaderMonadOp.local(brackets)
return var_provider
| 2.578125
| 3
|
apps/approval/migrations/0001_initial.py
|
Kpaubert/onlineweb4
| 32
|
12778909
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Approval",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created",
models.DateTimeField(auto_now_add=True, verbose_name="opprettet"),
),
(
"processed",
models.BooleanField(
default=False, verbose_name="behandlet", editable=False
),
),
(
"processed_date",
models.DateTimeField(
null=True, verbose_name="behandlet dato", blank=True
),
),
(
"approved",
models.BooleanField(
default=False, verbose_name="godkjent", editable=False
),
),
("message", models.TextField(verbose_name="melding")),
],
options={},
bases=(models.Model,),
),
migrations.CreateModel(
name="MembershipApproval",
fields=[
(
"approval_ptr",
models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to="approval.Approval",
on_delete=models.CASCADE,
),
),
(
"new_expiry_date",
models.DateField(
null=True, verbose_name="ny utl\xf8psdato", blank=True
),
),
(
"field_of_study",
models.SmallIntegerField(
default=0,
verbose_name="studieretning",
choices=[
(0, "Gjest"),
(1, "Bachelor i Informatikk (BIT)"),
(10, "Software (SW)"),
(11, "Informasjonsforvaltning (DIF)"),
(12, "Komplekse Datasystemer (KDS)"),
(13, "Spillteknologi (SPT)"),
(14, "Intelligente Systemer (IRS)"),
(15, "Helseinformatikk (MSMEDTEK)"),
(30, "<NAME>"),
(80, "PhD"),
(90, "International"),
(100, "<NAME>"),
],
),
),
(
"started_date",
models.DateField(
null=True, verbose_name="startet dato", blank=True
),
),
],
options={
"verbose_name": "medlemskapss\xf8knad",
"verbose_name_plural": "medlemskapss\xf8knader",
"permissions": (
("view_membershipapproval", "View membership approval"),
),
},
bases=("approval.approval",),
),
]
| 1.726563
| 2
|
alf/environments/suite_safety_gym.py
|
hnyu/entropy_reward
| 0
|
12778910
|
# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Suite for loading OpenAI `Safety Gym <https://openai.com/blog/safety-gym/>`_ environments.
**NOTE**: Mujoco requires separated installation.
(gym >= 0.10, and mujoco>=1.50)
Follow the instructions at:
https://github.com/openai/mujoco-py
Several general facts about the provided benchmark environments:
1. All have distance-based dense rewards (can be customized to be sparse).
2. All have continual goals: after reaching a goal, the goal is reset but the
layout keeps the same until timeout (can be customized to not reset goals).
3. Layouts are randomized before episodes begin
4. Costs are indicator binaries (0 or 1). Every positive cost will be binarized
to 1. Thus the total cost will be 1 if any component cost is positive.
5. level 0 has no constraints; level 1 has some unsafe elements; level 2 has
very dense unsafe elements.
See https://github.com/openai/safety-gym/blob/f31042f2f9ee61b9034dd6a416955972911544f5/safety_gym/envs/engine.py#L97
for a complete list of default configurations.
"""
try:
import mujoco_py
import safety_gym
except ImportError:
mujoco_py = None
safety_gym = None
import numpy as np
import copy
import gym
import alf
from alf.environments import suite_gym
from alf.environments.alf_wrappers import NonEpisodicAgent
def is_available():
"""Check if both ``mujoco_py`` and ``safety_gym`` have been installed."""
return (mujoco_py is not None and safety_gym is not None)
class VisionObservationWrapper(gym.ObservationWrapper):
"""If the observation is a dict and it contains a key 'vision',
return an uint8 RGB image in [0,255] and a flat vector containing any other
info."""
def __init__(self, env):
super().__init__(env)
self._vision = False
if (isinstance(self.observation_space, gym.spaces.Dict)
and 'vision' in self.observation_space.spaces):
self._vision = True
observation_space = {}
observation_space['vision'] = self.observation_space['vision']
self.obs_flat_size = sum([
np.prod(i.shape)
for (k, i) in self.observation_space.spaces.items()
if k != 'vision'
])
observation_space['robot'] = gym.spaces.Box(
-np.inf, np.inf, (self.obs_flat_size, ), dtype=np.float32)
self.observation_space = gym.spaces.Dict(observation_space)
def observation(self, observation):
if self._vision:
obs = {"vision": observation["vision"]}
flat_obs = np.zeros(self.obs_flat_size)
offset = 0
for k in sorted(observation.keys()):
if k == 'vision':
continue
k_size = np.prod(observation[k].shape)
flat_obs[offset:offset + k_size] = observation[k].flat
offset += k_size
obs['robot'] = flat_obs
return obs
return observation
class CompleteEnvInfo(gym.Wrapper):
"""Always set the complete set of information so that the env info has a
fixed shape (no matter whether some event occurs or not), which is required
by ALF.
The current safety gym env only adds a key to env info when the corresponding
event is triggered, see:
https://github.com/openai/safety-gym/blob/f31042f2f9ee61b9034dd6a416955972911544f5/safety_gym/envs/engine.py#L1242
"""
def __init__(self, env, env_name):
super().__init__(env)
# env info keys are retrieved from:
# https://github.com/openai/safety-gym/blob/master/safety_gym/envs/engine.py
self._env_info_keys = [
'cost_exception',
'goal_met',
'cost' # this is the summed overall cost
]
if not self._is_level0_env(env_name):
# for level 1 and 2 envs, there are constraints cost info
self._env_info_keys += [
'cost_vases_contact', 'cost_pillars', 'cost_buttons',
'cost_gremlins', 'cost_vases_displace', 'cost_vases_velocity',
'cost_hazards'
]
self._default_env_info = self._generate_default_env_info()
def _is_level0_env(self, env_name):
return "0-v" in env_name
def _generate_default_env_info(self):
env_info = {}
for key in self._env_info_keys:
if key == "goal_met":
env_info[key] = False
else:
env_info[key] = np.float32(0.)
return env_info
def step(self, action):
"""Take a step through the environment the returns the complete set of
env info, regardless of whether the corresponding event is enabled or not.
"""
env_info = copy.copy(self._default_env_info)
obs, reward, done, info = self.env.step(action)
env_info.update(info)
return obs, reward, done, env_info
class VectorReward(gym.Wrapper):
"""This wrapper makes the env returns a reward vector of length 3. The three
dimensions are:
1. distance-improvement reward indicating the delta smaller distances of
agent<->box and box<->goal for "push" tasks, or agent<->goal for
"goal"/"button" tasks.
2. negative binary cost where -1 means that at least one constraint has been
violated at the current time step (constraints vary depending on env
configurations).
3. a success indicator where 1 means the goal is met at the current step
All rewards are the higher the better.
"""
REWARD_DIMENSION = 2
def __init__(self, env, sparse_reward):
super().__init__(env)
self._reward_space = gym.spaces.Box(
low=-float('inf'),
high=float('inf'),
shape=[self.REWARD_DIMENSION])
self._sparse_reward = sparse_reward
def step(self, action):
"""Take one step through the environment and obtains several rewards.
Args:
action (np.array):
Returns:
tuple:
- obs (np.array): a flattened observation vector that contains
all enabled sensors' data
- rewards (np.array): a reward vector of length ``REWARD_DIMENSION``.
See the class docstring for their meanings.
- done (bool): whether the episode has ended
- info (dict): a dict of additional env information
"""
obs, reward, done, info = self.env.step(action)
# Get the second and third reward from ``info``
cost_reward = -info["cost"]
success_reward = float(info["goal_met"])
if self._sparse_reward:
reward = success_reward
return obs, np.array([reward, cost_reward],
dtype=np.float32), done, info
@property
def reward_space(self):
return self._reward_space
@alf.configurable(blacklist=['env'])
class RGBRenderWrapper(gym.Wrapper):
"""A ``metadata`` field should've been defined in the original safety gym env;
otherwise video recording will be disabled. See
https://github.com/openai/gym/blob/master/gym/wrappers/monitoring/video_recorder.py#L41
Also the original env needs a ``camera_id`` if "rgb_array" mode is used for
rendering, which is incompatible with our ``ALFEnvironment`` interfaces.
Here we wrap ``render()`` with a customizable camera mode.
"""
_metadata = {'render.modes': ["rgb_array", "human"]}
def __init__(self, env, width=800, height=800, camera_mode="fixedfar"):
"""
Args:
width (int): the width of rgb image
height (int): the height of rbg image
camera_mode (str): one of ('fixednear', 'fixedfar', 'fixedtop', 'vision', 'track', 'top')
"""
super().__init__(env)
# self.metadata will first inherit subclass's metadata
self.metadata.update(self._metadata)
self._width = width
self._height = height
self._camera_mode = camera_mode
def render(self, mode="human"):
camera_id = self.unwrapped.model.camera_name2id(self._camera_mode)
render_kwargs = dict(mode=mode, camera_id=camera_id)
if self._width is not None:
render_kwargs["width"] = self._width
if self._height is not None:
render_kwargs["height"] = self._height
return self.env.render(**render_kwargs)
@alf.configurable
class EpisodicWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
def step(self, action):
obs, reward, done, info = self.env.step(action)
if info["goal_met"]:
done = True
#print("xy: [%s,%s]" % (info['xy'][0], info['xy'][1]))
return obs, reward, done, info
def reset(self):
#print("xy: reset")
return self.env.reset()
@alf.configurable
def load(environment_name,
env_id=None,
discount=1.0,
max_episode_steps=None,
unconstrained=False,
sparse_reward=False,
episodic=False,
gym_env_wrappers=(),
alf_env_wrappers=()):
"""Loads the selected environment and wraps it with the specified wrappers.
Note that by default a ``TimeLimit`` wrapper is used to limit episode lengths
to the default benchmarks defined by the registered environments.
Args:
environment_name: Name for the environment to load.
env_id: A scalar ``Tensor`` of the environment ID of the time step.
discount: Discount to use for the environment.
max_episode_steps: If None the ``max_episode_steps`` will be set to
the default step limit -1 defined in the environment. If 0, no
``TimeLimit`` wrapper will be used.
unconstrained (bool): if True, the suite will be used just as an
unconstrained environment. The reward will always be scalar without
including constraints.
sparse_reward (bool): If True, only give reward when reaching a goal.
episodic (bool): whether terminate the episode when a goal is achieved.
Note that if True, both ``EpisodicWrapper`` and ``NonEpisodicAgent``
wrapper will be used to simulate an infinite horizon even though the
success rate is computed on per-goal basis. This is for approximating
an average constraint reward objective. ``EpisodicWrapper`` first
returns ``done=True`` to signal the end of an episode, and ``NonEpisodicAgent``
replaces ``discount=0`` with ``discount=1``.
gym_env_wrappers: Iterable with references to wrapper classes to use
directly on the gym environment.
alf_env_wrappers: Iterable with references to wrapper classes to use on
the torch environment.
Returns:
AlfEnvironment:
"""
# We can directly make the env here because none of the safety gym tasks
# is registered with a ``max_episode_steps`` argument (the
# ``gym.wrappers.time_limit.TimeLimit`` won't be applied). But each task
# will inherently manage the time limit through ``env.num_steps``.
env = gym.make(environment_name)
# fill all env info with default values
env = CompleteEnvInfo(env, environment_name)
# make vector reward
if not unconstrained:
env = VectorReward(env, sparse_reward)
env = RGBRenderWrapper(env)
if episodic:
env = EpisodicWrapper(env)
alf_env_wrappers = alf_env_wrappers + (NonEpisodicAgent, )
env = VisionObservationWrapper(env)
# Have to -1 on top of the original env max steps here, because the
# underlying gym env will output ``done=True`` when reaching the time limit
# ``env.num_steps`` (before the ``AlfGymWrapper``), which is incorrect:
# https://github.com/openai/safety-gym/blob/f31042f2f9ee61b9034dd6a416955972911544f5/safety_gym/envs/engine.py#L1302
if max_episode_steps is None:
max_episode_steps = env.num_steps - 1
max_episode_steps = min(env.num_steps - 1, max_episode_steps)
return suite_gym.wrap_env(
env,
env_id=env_id,
discount=discount,
max_episode_steps=max_episode_steps,
gym_env_wrappers=gym_env_wrappers,
alf_env_wrappers=alf_env_wrappers)
| 1.625
| 2
|
Recursion/sum_of_digits.py
|
eferroni/Data-Structure-and-Algorithms
| 0
|
12778911
|
"""
How to find the sum of digits of a positive integer number using recursion?
"""
def sum_of_digits(n):
assert n >= 0 and int(n) == n, "n is lower than 0 or not a int"
if n < 10:
return n
return sum_of_digits(n // 10) + n % 10
print(sum_of_digits(0.6))
| 4.25
| 4
|
tests/test_clusterStructures/__init__.py
|
alekLukanen/pyDist
| 5
|
12778912
|
<reponame>alekLukanen/pyDist
#import tests.test_clusterStructures.star
from tests.test_clusterStructures.star import *
| 0.914063
| 1
|
tests/test_util.py
|
magicalyak/blinkpy
| 272
|
12778913
|
<gh_stars>100-1000
"""Test various api functions."""
import unittest
from unittest import mock
import time
from blinkpy.helpers.util import json_load, Throttle, time_to_seconds, gen_uid
class TestUtil(unittest.TestCase):
"""Test the helpers/util module."""
def setUp(self):
"""Initialize the blink module."""
def tearDown(self):
"""Tear down blink module."""
def test_throttle(self):
"""Test the throttle decorator."""
calls = []
@Throttle(seconds=5)
def test_throttle():
calls.append(1)
now = int(time.time())
now_plus_four = now + 4
now_plus_six = now + 6
test_throttle()
self.assertEqual(1, len(calls))
# Call again, still shouldn't fire
test_throttle()
self.assertEqual(1, len(calls))
# Call with force
test_throttle(force=True)
self.assertEqual(2, len(calls))
# Call without throttle, shouldn't fire
test_throttle()
self.assertEqual(2, len(calls))
# Fake time as 4 seconds from now
with mock.patch("time.time", return_value=now_plus_four):
test_throttle()
self.assertEqual(2, len(calls))
# Fake time as 6 seconds from now
with mock.patch("time.time", return_value=now_plus_six):
test_throttle()
self.assertEqual(3, len(calls))
def test_throttle_per_instance(self):
"""Test that throttle is done once per instance of class."""
class Tester:
"""A tester class for throttling."""
def test(self):
"""Test the throttle."""
return True
tester = Tester()
throttled = Throttle(seconds=1)(tester.test)
self.assertEqual(throttled(), True)
self.assertEqual(throttled(), None)
def test_throttle_multiple_objects(self):
"""Test that function is throttled even if called by multiple objects."""
@Throttle(seconds=5)
def test_throttle_method():
return True
class Tester:
"""A tester class for throttling."""
def test(self):
"""Test function for throttle."""
return test_throttle_method()
tester1 = Tester()
tester2 = Tester()
self.assertEqual(tester1.test(), True)
self.assertEqual(tester2.test(), None)
def test_throttle_on_two_methods(self):
"""Test that throttle works for multiple methods."""
class Tester:
"""A tester class for throttling."""
@Throttle(seconds=3)
def test1(self):
"""Test function for throttle."""
return True
@Throttle(seconds=5)
def test2(self):
"""Test function for throttle."""
return True
tester = Tester()
now = time.time()
now_plus_4 = now + 4
now_plus_6 = now + 6
self.assertEqual(tester.test1(), True)
self.assertEqual(tester.test2(), True)
self.assertEqual(tester.test1(), None)
self.assertEqual(tester.test2(), None)
with mock.patch("time.time", return_value=now_plus_4):
self.assertEqual(tester.test1(), True)
self.assertEqual(tester.test2(), None)
with mock.patch("time.time", return_value=now_plus_6):
self.assertEqual(tester.test1(), None)
self.assertEqual(tester.test2(), True)
def test_time_to_seconds(self):
"""Test time to seconds conversion."""
correct_time = "1970-01-01T00:00:05+00:00"
wrong_time = "1/1/1970 00:00:03"
self.assertEqual(time_to_seconds(correct_time), 5)
self.assertFalse(time_to_seconds(wrong_time))
def test_json_load_bad_data(self):
"""Check that bad file is handled."""
self.assertEqual(json_load("fake.file"), None)
with mock.patch("builtins.open", mock.mock_open(read_data="")):
self.assertEqual(json_load("fake.file"), None)
def test_gen_uid(self):
"""Test gen_uid formatting."""
val1 = gen_uid(8)
val2 = gen_uid(8, uid_format=True)
self.assertEqual(len(val1), 16)
self.assertTrue(val2.startswith("BlinkCamera_"))
val2_cut = val2.split("_")
val2_split = val2_cut[1].split("-")
self.assertEqual(len(val2_split[0]), 8)
self.assertEqual(len(val2_split[1]), 4)
self.assertEqual(len(val2_split[2]), 4)
self.assertEqual(len(val2_split[3]), 4)
self.assertEqual(len(val2_split[4]), 12)
| 2.65625
| 3
|
rename.py
|
isjeffcom/Emotion-Surveillance
| 1
|
12778914
|
<filename>rename.py
# !/usr/bin/python
import os
for root, dirs, files in os.walk("./mn/", topdown=True):
for name in dirs:
path = os.path.join(root, name)
al = os.listdir(path)
i = 0
for file in al:
i = i + 1
old = path + '/' + file
new = path + '/' + str(i) + '_img' +'.jpg'
if i > 11:
os.unlink(old)
print('Delete: ' + old)
else:
os.rename(old, new)
print('Rename: ' + old + ' - ' + new)
| 3.34375
| 3
|
src/bots/cogs/admin.py
|
cyork95/KronaBotFam
| 1
|
12778915
|
import discord
from discord.ext import commands
class Admin(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(help("Deletes the specified number of chats. Default is 2 messages."))
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount_to_delete=2):
await ctx.message.delete()
await ctx.channel.purge(limit=amount_to_delete)
@clear.error
async def clear_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a number?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def kick(self, ctx, member: discord.Member, *, reason=None):
await ctx.message.delete()
await member.kick(reason=reason)
await ctx.send(f'Kicked {member.mention} for {reason}')
@kick.error
async def kick_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def ban(self, ctx, member: discord.Member, *, reason=None):
await ctx.message.delete()
await member.ban(reason=reason)
await ctx.send(f'Banned {member.mention} for {reason}')
@ban.error
async def ban_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def unban(self, ctx, *, member):
await ctx.message.delete()
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'Unbanned {user.mention}#{user.discriminator}')
return
@unban.error
async def unban_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def mute(self, ctx, member: discord.Member = None):
"""Mute a member."""
await ctx.message.delete()
role = discord.utils.get(ctx.guild.roles, name="Muted")
await member.add_roles(role)
await ctx.send(member.mention + " You have been muted. Please reflect on what you said or did and come back "
"refreshed and ready to do better.")
@mute.error
async def mute_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def unmute(self, ctx, member: discord.Member = None):
"""Unmute a member."""
await ctx.message.delete()
role = discord.utils.get(ctx.guild.roles, name="Muted")
await member.remove_roles(role)
await ctx.send(member.mention + " You have been unmuted. Enjoy your new freedom!.")
@unmute.error
async def unmute_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def add_role(self, ctx, member: discord.Member, role=None):
await ctx.message.delete()
discord_role = discord.utils.get(ctx.guild.roles, name=role)
await member.add_roles(discord_role)
await ctx.send(member.mention + f' You have been added to the role: {role}. Enjoy your new role!')
@add_role.error
async def add_role_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user and a role?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def remove_role(self, ctx, member: discord.Member, role=None):
await ctx.message.delete()
discord_role = discord.utils.get(ctx.guild.roles, name=role)
await member.remove_roles(discord_role)
await ctx.send(member.mention + f' You have been removed from the role: {role}.')
@remove_role.error
async def remove_role_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user and a role?')
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Admin(client))
| 2.703125
| 3
|
brunton_lab_to_nwb/nwbwidgets.py
|
catalystneuro/brunton-lab-to-nwb
| 1
|
12778916
|
<gh_stars>1-10
import numpy as np
import plotly.graph_objects as go
import pynwb
from ipywidgets import widgets, ValueWidget
from plotly.colors import DEFAULT_PLOTLY_COLORS
class ShowElectrodesWidget(ValueWidget, widgets.HBox):
def __init__(self, nwbobj: pynwb.base.DynamicTable, **kwargs):
super().__init__()
group_names = nwbobj.group_name[:]
ugroups, group_pos, counts = np.unique(group_names,
return_inverse=True,
return_counts=True)
self.fig = go.FigureWidget()
x = nwbobj.x[:]
y = nwbobj.y[:]
z = nwbobj.z[:]
for i, group in enumerate(ugroups):
inds = group_names == group
self.fig.add_trace(
go.Scatter3d(
x=x[inds], y=y[inds], z=z[inds],
surfacecolor=np.array(DEFAULT_PLOTLY_COLORS)[i % len(DEFAULT_PLOTLY_COLORS)],
mode='markers',
name=group
)
)
self.children = [self.fig]
| 2.25
| 2
|
yetAnotherDudeApp/issueTracker/admin.py
|
grillazz/yet-another-dude-app
| 1
|
12778917
|
from django.contrib import admin
from django.db.models import Count
from .models import *
@admin.register(Status)
class StatusAdmin(admin.ModelAdmin):
list_display = ('code',)
@admin.register(Priority)
class PriorityAdmin(admin.ModelAdmin):
list_display = ('code',)
@admin.register(Issue)
class IssueAdmin(admin.ModelAdmin):
list_display = ('title', 'status', 'priority', 'submitter',
'submitted_date', 'modified_date')
list_filter = ('priority', 'status', 'submitted_date')
search_fields = ('title', 'description',)
@admin.register(IssueSummary)
class IssueSummary(admin.ModelAdmin):
change_list_template = 'admin/issue_summary_change_list.html'
date_hierarchy = 'submitted_date'
list_filter = (
'priority',
)
def has_add_permission(self, request):
return False
def changelist_view(self, request, extra_context=None):
response = super().changelist_view(
request,
extra_context=extra_context,
)
try:
qs = response.context_data['cl'].queryset
except (AttributeError, KeyError):
return response
metrics = {
'total': Count('id'),
}
response.context_data['summary'] = list(
qs.values('priority__code').annotate(**metrics)
)
response.context_data['summary_total'] = dict(
qs.aggregate(**metrics)
)
return response
| 1.757813
| 2
|
keras_frcnn/reporting/GoogleSpreadsheetReporter.py
|
kwon-young/MusicObjectDetector
| 1
|
12778918
|
import traceback
from typing import List
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/sheets.googleapis.com-python-quickstart.json
MAX_NUMBER_OF_LINES = 400
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Sheets API Python Training Reporter'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
credentials = tools.run_flow(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def append_result_to_spreadsheet(dataset_size: int = 140,
model_name: str = "vgg4",
configuration_name="many_anchor_box_ratios",
data_augmentation="",
early_stopping: int = 20,
reduction_patience: int = 8,
learning_rate_reduction_factor: float = 0.5,
optimizer: str = "Adadelta",
initial_learning_rate: float = 1.0,
non_max_suppression_overlap_threshold: float = 0.7,
non_max_suppression_max_boxes: int = 300,
validation_accuracy: float = "0.90",
validation_total_loss: float = "0.10",
best_loss_rpn_cls: float = 999.9,
best_loss_rpn_regr: float = 999.9,
best_loss_class_cls: float = 999.9,
best_loss_class_regr: float = 999.9,
date: str = "24.12.9999",
datasets: str = "muscima_pp",
execution_time_in_seconds: int = "0"):
""" Appends the provided results to the Google Spreadsheets document
https://docs.google.com/spreadsheets/d/1MT4CH9yJD_vM9nT8JgnfmzwAVIuRoQYEyv-5FHMjYVo/edit#gid=0
"""
try:
service, spreadsheet_id = get_service_and_spreadsheet_id()
first_empty_line = get_first_empty_line_fast(service, spreadsheet_id)
print("Uploading results to Google Spreadsheet and appending at first empty line {0}".format(first_empty_line))
data = [dataset_size, model_name, configuration_name, data_augmentation, early_stopping, reduction_patience,
learning_rate_reduction_factor, optimizer, initial_learning_rate, non_max_suppression_overlap_threshold,
non_max_suppression_max_boxes, validation_accuracy, validation_total_loss, best_loss_rpn_cls,
best_loss_rpn_regr, best_loss_class_cls, best_loss_class_regr, date, datasets,
execution_time_in_seconds]
write_into_spreadsheet(service, spreadsheet_id, data, first_empty_line)
except Exception as exception:
print("Error while uploading results to Google Spreadsheet: {0}".format(str(exception)))
traceback.print_exc()
def get_service_and_spreadsheet_id():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discovery_url = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discovery_url)
spreadsheet_id = '1MT4CH9yJD_vM9nT8JgnfmzwAVIuRoQYEyv-5FHMjYVo'
return service, spreadsheet_id
def write_into_spreadsheet(service, spreadsheet_id, row_data: List[str], line_number):
value_input_option = "RAW"
body = {
'values': [
row_data,
# Another row, currently not supported
]
}
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheet_id, range="Sheet1!A{0}:Z{0}".format(line_number),
valueInputOption=value_input_option, body=body).execute()
return result
def get_first_empty_line_fast(service, spreadsheet_id) -> int:
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id, range="Sheet1!A1:A{0}".format(MAX_NUMBER_OF_LINES)).execute()
values = result.get('values', [])
return len(values) + 1
if __name__ == '__main__':
append_result_to_spreadsheet()
| 2.875
| 3
|
wmt/flask/views/sims.py
|
mcflugen/wmt-rest
| 0
|
12778919
|
import os
from flask import Blueprint
from flask import json, url_for, current_app
from flask import g, request, abort, send_file
from flaskext.uploads import UploadSet
from ..utils import as_resource, as_collection
from ..db import sim as sim_db
sims_page = Blueprint('sims', __name__)
#STAGE_DIR = '/data/web/htdocs/wmt/api/dev/files/downloads'
def to_resource(sim):
#links = []
#for tag in tag_db.tags_with_model(model.id):
# link = dict(rel='collection/tags')
# if tag is not None:
# link['href'] = url_for('tags.tag', id=tag.id)
# else:
# link['href'] = None
# links.append(link)
return {
'_type': 'sim',
'id': sim.id,
'uuid': sim.uuid,
'href': '/api/sims/%d' % sim.id,
'created': sim.created,
'updated': sim.updated,
'owner': sim.owner or None,
#'links': links,
}
def to_collection(sims):
return [to_resource(sim) for sim in sims]
@sims_page.route('/', methods=['GET', 'POST', 'OPTIONS'])
def show():
if request.method == 'GET':
sort = request.args.get('sort', 'id')
order = request.args.get('order', 'asc')
sims = sim_db.all(sort=sort, order=order)
collection = [to_resource(sim) for sim in sims]
return as_collection(collection)
elif request.method == 'POST':
data = json.loads(request.data)
return as_resource(to_resource(
sim_db.add(data['name'], data['model'])))
@sims_page.route('/<int:id>', methods=['GET', 'PATCH', 'REMOVE'])
def sim(id):
sim = sim_db.get(id) or abort(404)
if request.method == 'PATCH':
data = json.loads(request.data)
if set(data.keys()).issubset(['status', 'message']):
sim_db.update_status(id, **data) or abort(401)
else:
abort(400)
elif request.method == 'REMOVE':
sim_db.remove()
return as_resource(to_resource(sim))
@sims_page.route('/<int:id>/status', methods=['GET', 'PATCH', 'PUT'])
def status(id):
if request.method in ['PATCH', 'PUT']:
data = json.loads(request.data)
keys = set(data.keys())
if request.method == 'PATCH' and not keys.issubset(['status',
'message']):
abort(400)
elif request.method == 'PUT' and keys != set(['status', 'message']):
abort(400)
sim_db.update_status(**data)
sim = sim_db.get(id) or abort(404)
return as_resource({'status': sim.status,
'message': sim.message })
@sims_page.route('/<int:id>/files', methods=['GET'])
def files(id):
import tempfile, tarfile, shutil
format = request.args.get('format', 'gztar')
sim = sim_db.get(id) or abort(404)
try:
tmpdir = tempfile.mkdtemp(prefix='wmt', suffix='.d')
except:
raise
else:
archive = os.path.join(tmpdir, str(sim.uuid))
name = shutil.make_archive(archive, format,
current_app.config['STAGE_DIR'], sim.uuid)
return send_file(name, attachment_filename=os.path.basename(name),
as_attachment=True)
finally:
shutil.rmtree(tmpdir)
@sims_page.route('/<int:id>/actions', methods=['POST'])
def actions(id):
if request.method == 'POST':
data = json.loads(request.data)
if data['action'] == 'start':
sim_db.start(id)
elif data['action'] == 'stop':
sim_db.stop(id)
else:
abort(400)
| 2.1875
| 2
|
mergesort/merge_sort_test.py
|
timpel/stanford-algs
| 0
|
12778920
|
<reponame>timpel/stanford-algs
import merge_sort
for n in [2**n for n in range(20)]:
merge_sort.main(n, False)
| 2.28125
| 2
|
plugins/ts.py
|
lucasberti/telegrao-py
| 0
|
12778921
|
<gh_stars>0
# Roubado / adaptado de https://github.com/benediktschmitt/py-ts3/blob/master/ts3/examples/viewer.py
from pprint import pprint
from api import send_message
import ts3
import os
__all__ = ["ChannelTreeNode",
"view"]
result = ""
class ChannelTreeNode(object):
def __init__(self, info, parent, root, clients=None):
self.info = info
self.childs = list()
# Init a root channel
if root is None:
self.parent = None
self.clients = None
self.root = self
# Init a real channel
else:
self.parent = parent
self.root = root
self.clients = clients if clients is not None else list()
return None
@classmethod
def init_root(cls, info):
return cls(info, None, None, None)
def is_root(self):
return self.parent is None
def is_channel(self):
return self.parent is not None
@classmethod
def build_tree(cls, ts3conn, sid):
ts3conn.use(sid=sid, virtual=True)
resp = ts3conn.serverinfo()
serverinfo = resp.parsed[0]
resp = ts3conn.channellist()
channellist = resp.parsed
resp = ts3conn.clientlist()
clientlist = resp.parsed
# channel id -> clients
clientlist = {cid: [client for client in clientlist \
if client["cid"] == cid]
for cid in map(lambda e: e["cid"], channellist)}
root = cls.init_root(serverinfo)
for channel in channellist:
resp = ts3conn.channelinfo(cid=channel["cid"])
channelinfo = resp.parsed[0]
# This makes sure, that *cid* is in the dictionary.
channelinfo.update(channel)
channel = cls(
info=channelinfo, parent=root, root=root,
clients=clientlist[channel["cid"]])
root.insert(channel)
return root
def insert(self, channel):
self.root._insert(channel)
return None
def _insert(self, channel):
if self.is_root():
i = 0
while i < len(self.childs):
child = self.childs[i]
if channel.info["cid"] == child.info["pid"]:
channel.childs.append(child)
self.childs.pop(i)
else:
i += 1
# This is not the root and the channel is a direct child of this one.
elif channel.info["pid"] == self.info["cid"]:
self.childs.append(channel)
return True
# Try to insert the channel recursive.
for child in self.childs:
if child._insert(channel):
return True
# If we could not find a parent in the whole tree, assume, that the
# channel is a child of the root.
if self.is_root():
self.childs.append(channel)
return False
def generate_repr(self, indent=0):
global result
if self.is_root():
# print(" "*(indent*3) + "|-", self.info["virtualserver_name"])
result += str(" "*(indent*3)) + "|- " + self.info["virtualserver_name"] + "\n"
else:
#print(" "*(indent*3) + "|-", self.info["channel_name"])
result += str(" "*(indent*3)) + "|- " + self.info["channel_name"] + "\n"
for client in self.clients:
# Ignore query clients
if client["client_type"] == "1":
continue
# print(" "*(indent*3+3) + "->", client["client_nickname"])
result += str(" "*(indent*3+3)) + "-> " + client["client_nickname"] + "\n"
for child in self.childs:
child.generate_repr(indent=indent + 1)
return None
def view(ts3conn, sid=1):
global result
result = ""
tree = ChannelTreeNode.build_tree(ts3conn, sid)
tree.generate_repr()
return result
def on_msg_received(msg, matches):
with ts3.query.TS3Connection("localhost") as ts3conn:
ts3conn.login(client_login_name="serveradmin", client_login_password=os.environ["<PASSWORD>"])
message = view(ts3conn, sid=1)
send_message(msg["chat"]["id"], "```" + message + "```")
send_message("14160874", "ts")
# Main
# ------------------------------------------------
if __name__ == "__main__":
with ts3.query.TS3Connection("localhost") as ts3conn:
ts3conn.login(client_login_name="serveradmin", client_login_password=os.environ["<PASSWORD>"])
print(view(ts3conn, sid=1))
| 2.4375
| 2
|
a10sdk/core/aam/aam_authentication_relay_kerberos_instance.py
|
deepfield/a10sdk-python
| 16
|
12778922
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Instance(A10BaseClass):
"""Class Description::
Kerberos Authentication Relay.
Class instance supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param kerberos_account: {"description": "Specify the kerberos account name", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 127, "type": "string"}
:param name: {"description": "Specify Kerberos authentication relay name", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}
:param encrypted: {"optional": true, "type": "encrypted", "description": "Do NOT use this option manually. (This is an A10 reserved keyword.) (The ENCRYPTED secret string)", "format": "encrypted"}
:param kerberos_realm: {"description": "Specify the kerberos realm", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param kerberos_kdc_service_group: {"description": "Specify an authentication service group as multiple KDCs", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 127, "not": "kerberos-kdc", "type": "string", "$ref": "/axapi/v3/aam/authentication/service-group"}
:param timeout: {"description": "Specify timeout for kerberos transport, default is 10 seconds (The timeout, default is 10 seconds)", "format": "number", "default": 10, "optional": true, "maximum": 255, "minimum": 1, "type": "number"}
:param password: {"default": 0, "optional": true, "type": "number", "description": "Specify password of Kerberos password", "format": "flag"}
:param kerberos_kdc: {"description": "Specify the kerberos kdc ip or host name", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "not": "kerberos-kdc-service-group", "type": "string"}
:param port: {"description": "Specify The KDC port, default is 88", "format": "number", "default": 88, "optional": true, "maximum": 65535, "minimum": 1, "type": "number"}
:param secret_string: {"description": "The kerberos client password", "format": "password", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/aam/authentication/relay/kerberos/instance/{name}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "name"]
self.b_key = "instance"
self.a10_url="/axapi/v3/aam/authentication/relay/kerberos/instance/{name}"
self.DeviceProxy = ""
self.kerberos_account = ""
self.name = ""
self.encrypted = ""
self.kerberos_realm = ""
self.uuid = ""
self.kerberos_kdc_service_group = ""
self.timeout = ""
self.password = ""
self.kerberos_kdc = ""
self.port = ""
self.secret_string = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| 2.015625
| 2
|
filem/filem/samples/load_xml.py
|
DmitryRyumin/pkgs
| 2
|
12778923
|
<reponame>DmitryRyumin/pkgs<filename>filem/filem/samples/load_xml.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Загрузка XML файла
python filem/samples/load_xml.py --file путь_к_файлу_XML [--no_clear_shell]
"""
# ######################################################################################################################
# Импорт необходимых инструментов
# ######################################################################################################################
import argparse # Парсинг аргументов и параметров командной строки
# Персональные
from trml.shell import Shell # Работа с Shell
from filem.xml import Xml # Работа с XML
# ######################################################################################################################
# Выполняем только в том случае, если файл запущен сам по себе
# ######################################################################################################################
def main():
# Построение аргументов командой строки
ap = argparse.ArgumentParser()
# Добавление аргументов в парсер командной строки
ap.add_argument('--file', required=True, help='Путь к файлу XML')
ap.add_argument('--no_clear_shell', action='store_false', help='Не очищать консоль перед выполнением')
args = vars(ap.parse_args()) # Преобразование списка аргументов командной строки в словарь
# Очистка консоли перед выполнением
if args['no_clear_shell'] is True:
Shell.clear() # Очистка консоли
_xml = Xml() # Работа с XML
data = _xml.load(args['file']) # Загрузка XML файла
# Данные не загружены
if data is None:
return None
_xml.recursive_data_display(data) # Рекурсивное отображение данные из словаря
print() # Разрыв
if __name__ == "__main__":
main()
| 2.0625
| 2
|
tests/musictree/test_accidentals.py
|
alexgorji/music_score
| 2
|
12778924
|
<gh_stars>1-10
import os
from quicktions import Fraction
from musicscore.musicstream.streamvoice import SimpleFormat
from musicscore.musictree.treechord import TreeChord
from musicscore.musictree.treechordflags3 import TreeChordFlag3
from musicscore.musictree.treescoretimewise import TreeScoreTimewise
from musicxmlunittest import XMLTestCase
from tests.score_templates.xml_test_score import TestScore
path = str(os.path.abspath(__file__).split('.')[0])
class Test(XMLTestCase):
def setUp(self):
self.score = TreeScoreTimewise()
self.score.add_measure()
self.score.add_part()
def test_1(self):
midis = [61, 61, 62, 60, 63, 64, 65, 61]
for midi in midis:
self.score.add_chord(1, 1, TreeChord(midi, quarter_duration=0.5))
self.score.get_measure(1).get_part(1)
self.score.finish()
result_path = path + '_test_1'
self.score.write(path=result_path)
TestScore().assert_template(result_path=result_path)
def test_2(self):
midis = [60.0, 60.5, 61.0, 62.5, 64.0, 66.0, 68.0, 69.5, 71.0, 71.5, 72.0, 71.5, 71.0, 69.5, 68.0, 66.0, 64.0,
62.5, 61.0, 60.5]
measure_number = 1
for midi in midis:
chord = TreeChord(midi, quarter_duration=0.5)
chord.add_lyric(midi)
self.score.add_chord(measure_number, 1, chord)
remaining_duration = self.score.get_measure(measure_number).get_part(1).get_staff(1).get_voice(
1).remaining_duration
if remaining_duration == 0:
self.score.add_measure()
measure_number += 1
self.score.accidental_mode = 'modern'
result_path = path + '_test_2'
self.score.write(path=result_path)
TestScore().assert_template(result_path=result_path)
def test_3(self):
midis = [(61.0, 63), 61.0, 0, 62.0, 61, 61, 61, (62, 61)]
measure_number = 1
for midi in midis:
chord = TreeChord(midi, quarter_duration=0.5)
chord.add_lyric([m.value for m in chord.midis])
self.score.add_chord(measure_number, 1, chord)
remaining_duration = self.score.get_measure(measure_number).get_part(1).get_staff(1).get_voice(
1).remaining_duration
if remaining_duration == 0:
self.score.add_measure()
measure_number += 1
self.score.accidental_mode = 'modern'
result_path = path + '_test_3'
self.score.write(path=result_path)
TestScore().assert_template(result_path=result_path)
def test_4(self):
simpleformat = SimpleFormat(midis=list(range(60, 68)))
voice = simpleformat.to_stream_voice(2)
voice.add_to_score(self.score)
xml_path = path + '_test_4.xml'
self.score.accidental_mode = 'modern'
self.score.write(xml_path)
self.assertCompareFiles(xml_path)
def test_5(self):
simpleformat = SimpleFormat(midis=[71.5, 71.5, 72, 72, 71.5, 71.5], quarter_durations=6 * [0.5])
voice = simpleformat.to_stream_voice(1)
voice.add_to_score(self.score)
result_path = path + '_test_5'
self.score.accidental_mode = 'normal'
self.score.write(result_path)
TestScore().assert_template(result_path=result_path)
def test_6(self):
midis = [51.5, 51.5, 50.5, 48.5, 49.5, 48.5, 50.0, 50.0, 49.5, 49.0]
durations = [Fraction(255, 56), Fraction(6525, 3136), Fraction(6075, 3136), Fraction(2475, 3136),
Fraction(2145, 3136), Fraction(2805, 3136), Fraction(1815, 3136), Fraction(65, 56),
Fraction(2015, 1568), Fraction(1625, 1568)]
simpleformat = SimpleFormat(midis=midis, quarter_durations=durations)
simpleformat.auto_clef()
voice = simpleformat.to_stream_voice(1)
voice.add_to_score(self.score)
result_path = path + '_test_6'
self.score.max_division = 7
self.score.accidental_mode = 'modern'
self.score.write(result_path)
TestScore().assert_template(result_path=result_path)
def test_7(self):
# todo update_accidental does not work ...
class TestFlag3(TreeChordFlag3):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def implement(self, chord):
split = chord.split(2, 2)
split[0].to_rest()
for ch in split:
ch.update_type()
ch.update_dot()
return split
xml_path = path + '_test_7.xml'
sf = SimpleFormat(midis=[61], quarter_durations=[4])
sf.to_stream_voice().add_to_score(self.score, part_number=1)
chord = sf.chords[0]
chord.add_flag(TestFlag3())
sf.to_stream_voice().add_to_score(self.score, part_number=2)
self.score.write(xml_path)
def test_8(self):
midis = [60 + factor * 0.5 for factor in range(0, 25)]
simple_format = SimpleFormat(midis=midis + midis[-1::-1][1:])
for index, chord in enumerate(simple_format.chords):
if index <= len(midis) - 1:
chord.midis[0].accidental.mode = 'sharp'
else:
chord.midis[0].accidental.mode = 'flat'
simple_format.to_stream_voice().add_to_score(self.score)
xml_path = path + '_test_8.xml'
self.score.write(xml_path)
TestScore().assert_template(xml_path)
def test_9(self):
midis = [60, 61, 62, 63, 64, 61, 62, 61]
simple_format = SimpleFormat(midis=midis)
simple_format.to_stream_voice().add_to_score(self.score, part_number=1)
for chord in simple_format.chords:
chord.midis[0].accidental.force_show = True
simple_format.to_stream_voice().add_to_score(self.score, part_number=2)
for chord in simple_format.chords:
chord.midis[0].accidental.force_hide = True
simple_format.to_stream_voice().add_to_score(self.score, part_number=3)
xml_path = path + '_test_9.xml'
self.score.write(xml_path)
TestScore().assert_template(xml_path)
# def test_10(self):
# v1 = SimpleFormat(quarter_durations=[2, 2], midis=[72, 73])
# v2 = SimpleFormat(quarter_durations=[2, 2], midis=[60, 61])
# v1.to_stream_voice(1).add_to_score(self.score)
# v2.to_stream_voice(2).add_to_score(self.score)
#
# xml_path = path + '_test_10.xml'
# self.score.write(xml_path)
# self.assertCompareFiles(xml_path)
| 2.484375
| 2
|
src/main/python/tweetGater/gater.py
|
bryaneaton/BurstyTwitterStreams
| 2
|
12778925
|
#!/usr/bin/python
import sys
import re
gatedTweetPath = sys.argv[1]
inputPath = sys.argv[2]
outputPath = sys.argv[3]
tweetIdRegEx = re.compile("[0-9]{18}")
gatedTweetSet = set()
with open(gatedTweetPath, "r") as f:
for l in f:
gatedTweetSet.add(long(l))
# print gatedTweetSet
outputFile = open(outputPath, "w")
tweetIdIndex = None
with open(inputPath, "r") as f:
firstLine = f.next()
firstLine = firstLine.replace("\t", " ")
arr = firstLine.split(" ")
for i, e in enumerate(arr):
# print i, e
if ( tweetIdRegEx.match(e) ):
tweetIdIndex = i
break
# print tweetIdIndex
with open(inputPath, "r") as f:
for l in f:
l = l.replace("\t", " ")
arr = l.split(" ")
tweetId = long(arr[tweetIdIndex])
if ( tweetId in gatedTweetSet ):
outputFile.write(l)
outputFile.close()
| 2.796875
| 3
|
sensu_plugin/__init__.py
|
tubular/sensu-plugin-python
| 35
|
12778926
|
"""This module provides helpers for writing Sensu plugins"""
from sensu_plugin.plugin import SensuPlugin
from sensu_plugin.check import SensuPluginCheck
from sensu_plugin.metric import SensuPluginMetricGeneric
from sensu_plugin.metric import SensuPluginMetricGraphite
from sensu_plugin.metric import SensuPluginMetricInfluxdb
from sensu_plugin.metric import SensuPluginMetricJSON
from sensu_plugin.metric import SensuPluginMetricStatsd
from sensu_plugin.handler import SensuHandler
import sensu_plugin.pushevent
| 1.398438
| 1
|
article/views/home.py
|
vyahello/newspaper-parser
| 0
|
12778927
|
<reponame>vyahello/newspaper-parser<filename>article/views/home.py
"""Contains API for home page views."""
from typing import Any
from flask import Response, render_template
from article import application
from article.status import HttpStatus
@application.route(rule="/")
@application.route(rule="/home")
@application.route(rule="/index")
def home() -> str:
"""Returns home page content."""
return render_template(template_name_or_list="home/index.html")
@application.errorhandler(code_or_exception=HttpStatus.NOT_FOUND.code) # noqa: U101
def not_found(_: Any, **kwargs: Any) -> Response: # noqa: U101
"""Returns page not found response."""
return Response(response="The page was not found", status=HttpStatus.NOT_FOUND.code, **kwargs)
| 2.671875
| 3
|
tests/test_match_simulation.py
|
pitzer42/mini-magic
| 0
|
12778928
|
import tests.scenarios as scenarios
from tests.api_test_case import APITestCase
from entities import Match, Player
import events
class TestHappyPath(APITestCase):
@classmethod
def setUpClass(cls):
scenarios.two_players()
def match_setup(self):
match_id = self.post_to_create_a_new_match()
self.post_player_1_setup(match_id)
self.post_player_2_prompt(match_id)
return match_id
def post_to_create_a_new_match(self):
response = self.assertPost201('/matches')
self.assertJson(response, '_id')
match_id = response.json()['_id']
self.assertGet200('/matches/' + match_id)
return match_id
def post_player_1_setup(self, match_id):
request_data = {'player_id': 1, 'deck_id': 1}
self.assertPost200('/matches/'+match_id+'/join', json=request_data)
response = self.assertGet200('/matches/'+match_id)
self.assertJson(response, 'players')
match = Match(response.json())
players_in_the_match = len(match.players)
self.assertEqual(players_in_the_match, 1)
last_event = match.log[-1]['name']
self.assertEqual(last_event, events.Setup)
def post_player_2_prompt(self, match_id):
request_data = {'player_id': 2, 'deck_id': 2}
self.assertPost200('/matches/'+match_id+'/join', json=request_data)
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
players_in_the_match = len(match.players)
self.assertEqual(players_in_the_match, 2)
last_event = match.log[-1]['name']
self.assertEqual(last_event, events.Prompt)
def test_simulated_match(self):
match_id = self.match_setup()
self.play_turn_1(match_id)
self.assertPost200('/matches/' + match_id + '/players/2/end_turn')
self.play_and_use_counter(match_id)
self.post_end_turn(match_id)
def play_turn_1(self, match_id):
self.post_play_card(match_id)
self.post_use_card_to_get_resources(match_id)
self.post_use_resources_to_play_a_card(match_id)
self.post_use_card_to_deal_damage(match_id)
self.post_end_turn(match_id)
def post_play_card(self, match_id):
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
previous_board = len(match.current_player().board)
previous_hand = len(match.players[0].hand)
self.assertPost200('/matches/' + match_id + '/players/1/play/1')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
board = len(match.current_player().board)
self.assertEqual(board, previous_board + 1)
cards_in_hand = len(match.players[0].hand)
self.assertEqual(cards_in_hand, previous_hand - 1)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_use_card_to_get_resources(self, match_id):
self.assertPost200('/matches/' + match_id + '/players/1/use/1')
self.assertPost200('/matches/' + match_id + '/players/2/yield')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
resources = match.current_player().resources
self.assertGreater(resources.a, 0)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_use_resources_to_play_a_card(self, match_id):
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
previous_board = len(match.players[0].board)
self.assertPost200('/matches/' + match_id + '/players/1/play/1')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
resources = match.current_player().resources
self.assertEqual(resources.a, 0)
cards_in_the_board = len(match.players[0].board)
self.assertEqual(cards_in_the_board, previous_board + 1)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_use_card_to_deal_damage(self, match_id):
self.assertPost200('/matches/' + match_id + '/players/1/use/2')
self.assertPost200('/matches/' + match_id + '/players/2/yield')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
enemy = match.players[1]
self.assertLess(enemy.hp, Player.INITIAL_HP)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_end_turn(self, match_id):
self.assertPost200('/matches/' + match_id + '/players/1/end_turn')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
self.assertEqual(match.current_player_index, 1)
def play_and_use_counter(self, match_id):
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
previous_hp = match.players[1].hp
self.assertPost200('/matches/' + match_id + '/players/1/use/2')
self.assertPost200('/matches/' + match_id + '/players/2/play/1')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
self.assertPost200('/matches/' + match_id + '/players/2/use/1')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
self.assertPost200('/matches/' + match_id + '/players/2/yield')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
hp = match.players[1].hp
self.assertEqual(len(match.stack), 0)
self.assertEqual(previous_hp, hp)
| 2.578125
| 3
|
bistiming/utils.py
|
candy02058912/bistiming
| 1
|
12778929
|
from __future__ import print_function, division, absolute_import, unicode_literals
import datetime
def div_timedelta_int(d, i):
d_us = d.microseconds + 1000000 * (d.seconds + 86400 * d.days)
return datetime.timedelta(microseconds=d_us / i)
def div_timedelta(d1, d2):
if isinstance(d2, int):
return div_timedelta_int(d1, d2)
d1_us = d1.microseconds + 1000000 * (d1.seconds + 86400 * d1.days)
d2_us = d2.microseconds + 1000000 * (d2.seconds + 86400 * d2.days)
return d1_us / d2_us
| 2.59375
| 3
|
src/examples/tutorial/ascent_intro/python/ascent_scene_example1.py
|
srini009/ascent
| 0
|
12778930
|
<reponame>srini009/ascent
###############################################################################
# Copyright (c) Lawrence Livermore National Security, LLC and other Ascent
# Project developers. See top-level LICENSE AND COPYRIGHT files for dates and
# other details. No copyright assignment is required to contribute to Ascent.
###############################################################################
import conduit
import conduit.blueprint
import ascent
import numpy as np
from ascent_tutorial_py_utils import tutorial_tets_example
mesh = conduit.Node()
# (call helper to create example tet mesh as in blueprint example 2)
tutorial_tets_example(mesh)
# Use Ascent with multiple scenes to render different variables
a = ascent.Ascent()
a.open()
a.publish(mesh);
# setup actions
actions = conduit.Node()
add_act = actions.append()
add_act["action"] = "add_scenes"
# declare two scenes (s1 and s2) to render the dataset
scenes = add_act["scenes"]
# our first scene (named 's1') will render the field 'var1'
# to the file out_scene_ex1_render_var1.png
scenes["s1/plots/p1/type"] = "pseudocolor";
scenes["s1/plots/p1/field"] = "var1";
scenes["s1/image_name"] = "out_scene_ex1_render_var1";
# our second scene (named 's2') will render the field 'var2'
# to the file out_scene_ex1_render_var2.png
scenes["s2/plots/p1/type"] = "pseudocolor";
scenes["s2/plots/p1/field"] = "var2";
scenes["s2/image_name"] = "out_scene_ex1_render_var2";
# print our full actions tree
print(actions.to_yaml())
# execute the actions
a.execute(actions)
a.close()
| 2.78125
| 3
|
automator/browsers/bugs/report_selenium.py
|
JannisBush/xs-leaks-browser-web
| 0
|
12778931
|
<filename>automator/browsers/bugs/report_selenium.py
import os
from selenium import webdriver
grid_url = "http://localhost:4444/wd/hub"
def get_driver():
return webdriver.Remote(
command_executor=grid_url,
options=webdriver.ChromeOptions())
try:
driver = get_driver()
driver.get("http://127.0.0.1:8080")
except Exception as e:
print(e)
| 2.6875
| 3
|
build-android/build.py
|
Zenfone2-Dev/vulkan-validation-layers
| 0
|
12778932
|
<reponame>Zenfone2-Dev/vulkan-validation-layers
#!/usr/bin/env python
#
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import multiprocessing
import os
import subprocess
import sys
THIS_DIR = os.path.realpath(os.path.dirname(__file__))
ALL_ARCHITECTURES = (
'arm',
'arm64',
'mips',
'mips64',
'x86',
'x86_64',
)
# According to vk_platform.h, armeabi is not supported for Vulkan
# so remove it from the abis list.
ALL_ABIS = (
'armeabi-v7a',
'arm64-v8a',
'mips',
'mips64',
'x86',
'x86_64',
)
def jobs_arg():
return '-j{}'.format(multiprocessing.cpu_count() * 2)
def arch_to_abis(arch):
return {
'arm': ['armeabi-v7a'],
'arm64': ['arm64-v8a'],
'mips': ['mips'],
'mips64': ['mips64'],
'x86': ['x86'],
'x86_64': ['x86_64'],
}[arch]
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__()
self.add_argument(
'--out-dir', help='Directory to place temporary build files.',
type=os.path.realpath, default=os.path.join(THIS_DIR, 'out'))
self.add_argument(
'--arch', choices=ALL_ARCHITECTURES,
help='Architectures to build. Builds all if not present.')
self.add_argument('--installdir', dest='installdir', required=True,
help='Installation directory. Required.')
# The default for --dist-dir has to be handled after parsing all
# arguments because the default is derived from --out-dir. This is
# handled in run().
self.add_argument(
'--dist-dir', help='Directory to place the packaged artifact.',
type=os.path.realpath)
def main():
print('Constructing Vulkan validation layer source...')
print('THIS_DIR: %s' % THIS_DIR)
parser = ArgParser()
args = parser.parse_args()
arches = ALL_ARCHITECTURES
if args.arch is not None:
arches = [args.arch]
# ensure directory exists.
if not os.path.isdir(args.installdir):
os.makedirs(args.installdir)
# Make paths absolute, and ensure directories exist.
installdir = os.path.abspath(args.installdir)
abis = []
for arch in arches:
abis.extend(arch_to_abis(arch))
build_cmd = [
'bash', THIS_DIR + '/android-generate.sh'
]
print('Generating generated layers...')
subprocess.check_call(build_cmd)
print('Generation finished')
if os.path.isdir('/buildbot/android-ndk'):
ndk_dir = '/buildbot/android-ndk'
elif os.path.isdir(os.environ['NDK_PATH']):
ndk_dir = os.environ['NDK_PATH'];
else:
print('Error: No NDK environment found')
return
ndk_build = os.path.join(ndk_dir, 'ndk-build')
platforms_root = os.path.join(ndk_dir, 'platforms')
toolchains_root = os.path.join(ndk_dir, 'toolchains')
build_dir = THIS_DIR
print('installdir: %s' % installdir)
print('ndk_dir: %s' % ndk_dir)
print('ndk_build: %s' % ndk_build)
print('platforms_root: %s' % platforms_root)
compiler = 'clang'
stl = 'gnustl_static'
obj_out = os.path.join(THIS_DIR, stl, 'obj')
lib_out = os.path.join(THIS_DIR, 'jniLibs')
print('obj_out: %s' % obj_out)
print('lib_out: %s' % lib_out)
build_cmd = [
'bash', ndk_build, '-C', build_dir, jobs_arg(),
'APP_ABI=' + ' '.join(abis),
# Use the prebuilt platforms and toolchains.
'NDK_PLATFORMS_ROOT=' + platforms_root,
'NDK_TOOLCHAINS_ROOT=' + toolchains_root,
'GNUSTL_PREFIX=',
# Tell ndk-build where all of our makefiles are and where outputs
# should go. The defaults in ndk-build are only valid if we have a
# typical ndk-build layout with a jni/{Android,Application}.mk.
'NDK_PROJECT_PATH=null',
'NDK_TOOLCHAIN_VERSION=' + compiler,
'APP_BUILD_SCRIPT=' + os.path.join(build_dir, 'jni', 'Android.mk'),
'APP_STL=' + stl,
'NDK_APPLICATION_MK=' + os.path.join(build_dir, 'jni', 'Application.mk'),
'NDK_OUT=' + obj_out,
'NDK_LIBS_OUT=' + lib_out,
'THIRD_PARTY_PATH=',
# Put armeabi-v7a-hard in its own directory.
'_NDK_TESTING_ALL_=yes'
]
print('Building Vulkan validation layers for ABIs:' +
' {}'.format(', '.join(abis)))
subprocess.check_call(build_cmd)
print('Finished building Vulkan validation layers')
out_package = os.path.join(installdir, 'vulkan_validation_layers.zip')
os.chdir(lib_out)
build_cmd = [
'zip', '-9qr', out_package, "."
]
print('Packaging Vulkan validation layers')
subprocess.check_call(build_cmd)
print('Finished Packaging Vulkan validation layers')
if __name__ == '__main__':
main()
| 1.8125
| 2
|
getHammersleyNodes_171203.py
|
yasokada/pySpherepts_171126
| 0
|
12778933
|
<gh_stars>0
import numpy as np
import sys
'''
v0.1 Dec. 03, 2017
- add getHammersleyNodes()
- add vdcorput()
- add get_fliplr()
- add basexpflip()
- add Test_getHammersleyNodes()
- add round_zero_direction()
'''
# %GETHAMMERSLEYNODES Comutes a Hammersley set of nodes on the unit sphere,
# % which are low-discrepancy sequences of nodes.
# %
# % X = getHammersleyNodes(N) returns an N-by-3 matrix of Hammersley nodes
# % on the sphere, which form a low-discrepancy sequence for the sphere.
# % The columns of X corresponds to the (x,y,z) cordinates of the nodes.
# %
# % For more details on these node sets see
# % <NAME> and <NAME>. Equidistribution on the sphere. SIAM Journal on
# % Scientific Computing, 18(2):595?609.
# %
# % <NAME> and <NAME> and <NAME>, 1997, Journal of
# % Graphics Tools , vol. 2, no. 2, 1997, pp 9-24.
# %
# % Example:
# % x = getHammersleyNodes(2000);
# % plotSphNodes(x);
#
# % Author: <NAME>, 2014
#
#
# % This code uses vdcorput, which was created by <NAME>.
# ported by <NAME> (Dec. 3, 2017)
def getHammersleyNodes(nval):
ts = vdcorput(nval, base=2)
#
ts = 2 * ts - 1
# get odd values such as [1,3,5,...]
pos = np.arange(1, 2 * nval + 1, 2)
#
phi = 2 * np.pi * (pos / 2.0 / nval)
phi = np.transpose(phi)
res = []
for idx, elem in enumerate(ts):
if idx >= len(ts) - 1:
break
wrk = 1.0 - np.power(elem, 2)
x1 = np.sqrt(wrk) * np.cos(phi[idx])
x2 = np.sqrt(wrk) * np.sin(phi[idx])
res += [[*x1, *x2, *elem]]
return np.array(res)
def vdcorput(kval, base):
# % VDCORPUT Base-b Van der Corput sequence, elements 0,..,k
# % INPUTS : k - maximum sequence index, non-negative integer
# % b - sequence base, integer exceeding 1
# % OUTPUTS : s - (k+1)*1 array, with s(i) storing element (i+1)
# % of base-b Van der Corput sequence
# % AUTHOR : <NAME>
#
# ported by <NAME> (Dec. 3, 2017)
if kval != np.floor(kval) or kval < 0:
print("ERROR:vdcorput() invalid [kval]:", kval)
sys.exit()
if base != np.floor(base) or base < 2:
print("ERROR:vdcorput() invalid [base]:", base)
sys.exit()
ss = np.zeros((kval+1, 1))
for idx in range(kval):
awrk = basexpflip(idx+1, base)
ncol = len(awrk[0])
gs = base ** np.array(range(1, ncol+1))
ss[idx+1][0] = np.sum(awrk / gs)
return ss
def basexpflip(kval, base):
# reversed base-b expansion of positive integer k
wrk = np.log(kval) / np.log(base)
jval = round_zero_direction(wrk) + 1.0
jval = jval.astype(int)[0]
res = np.zeros((1, jval))
qval = base**(jval - 1)
for idx in range(jval):
res[0][idx] = np.floor(kval / qval)
kval = kval - qval * res[0][idx]
qval = qval / base
return get_fliplr(res)
def get_fliplr(xs):
xs = np.array(xs)
if xs.ndim == 1:
return np.flipud(xs)
return np.fliplr(xs)
def round_zero_direction(xs):
xs = np.array(xs)
# to avoid "TypeError: iteration over a 0-d array"
# for the array with 1 element
if xs.ndim == 0:
xs = [xs]
#
res = []
for elem in xs:
if elem >= 0.0:
res += [np.floor(elem)]
else:
res += [np.ceil(elem)]
return np.array(res)
def Test_getHammersleyNodes():
res = getHammersleyNodes(2025)
print(res)
if __name__ == '__main__':
Test_getHammersleyNodes()
| 2.734375
| 3
|
btgs/server.py
|
MineRobber9000/btgs
| 2
|
12778934
|
from socketserver import BaseRequestHandler
import pathlib
import os
import mimetypes
import urllib.parse as urlparse
urlparse.uses_netloc.append("gemini")
urlparse.uses_relative.append("gemini")
class GeminiRequest:
"""A Gemini request, with URL and access to the underlying socket."""
def __init__(self,sock,url,initial_buf=b''):
self._sock = sock
self._buffer = initial_buf
self.url = url
self.parsed = urlparse.urlparse(url)
def closest_power_of_two(self,n):
"""Returns the power of two that is closest to, while being greater than, n."""
retval = 2
while retval<n: retval*=2
return retval
def recv(self,bufsize,flags=0):
"""A proxy over self._sock.recv that handles the initial buffer as well as other buffer problems."""
# time to do some funky shit
# do we have bufsize in our buffer?
if bufsize<=len(self._buffer):
# return that much
retval, self._buffer = self._buffer[:bufsize], self._buffer[bufsize:]
return retval
# if not, then ask for a power of two that's more than what was asked for
temp = self._sock.recv(self.closest_power_of_two(bufsize),flags)
self._buffer += temp
# now do we have bufsize in our buffer?
if bufsize<=len(self._buffer):
# return that much
retval, self._buffer = self._buffer[:bufsize], self._buffer[bufsize:]
return retval
else: # if not, just return what we have and go for it
retval, self._buffer = self._buffer, b''
return retval
def send(self,*args,**kwargs):
"""Plain alias of self._sock.sendall."""
return self._sock.sendall(*args,**kwargs)
def __getattr__(self,k):
"""Attempt to alias unknown attributes to self.parsed."""
# try and get the attribute off the parsed URL object
return getattr(self.parsed,k)
class GeminiRequestHandler(BaseRequestHandler):
HOSTS = [] # hostnames we can serve
PORT = 1965 # port we serve
ROOT = "/var/gemini" # root directory from which we serve files
DEFAULT_DEFAULT_META = { # default for the DEFAULT_META var
40: "Resource temporarily unavailable",
41: "Server unavailable",
42: "Unexpected error in CGI program",
43: "Unexpected error during handling of proxy request",
44: 60,
50: "Permanent failure",
51: "Not found",
52: "It's gone, Jim",
53: "Proxy request refused",
59: "Bad request",
60: "Provide a client certificate to continue",
61: "Not authorized to access this content",
62: "Invalid certificate provided"
}
def setup(self):
"""Gets us ready to handle the request. Any implementation-specific things should be done in setup_overrideable."""
self.peer_cert = self.request.get_peer_certificate()
self.setup_overrideable()
def handle(self):
"""Handles request. Parses request line and delegates response handling."""
buffer = b''
while b'\n' not in buffer and (temp:=self.request.recv(512)): buffer+=temp
if buffer[buffer.index(b'\n')-1]!=13: # request line must end with \r\n
self.header(59) # bad request
return
request, buffer = buffer[:buffer.index(b'\n')-1], buffer[buffer.index(b'\n')+1:]
if len(request)>1024: # maximum URL length is 1024 bytes
self.header(59) # bad request
return
try:
request = self.massage_request_line(request.decode("utf-8"),buffer)
except:
self.header(59) # bad request
return
if not self.preflight(request):
return # preflight will return the appropriate status code
if hasattr(self,f"handle_{request.scheme}"): # if we have a handler for that status...
getattr(self,f"handle_{request.scheme}")(request) # ...use it
else: # if not...
self.header(53) # treat it as a proxy request and refuse it
def massage_request_line(self,request_line,buffer):
"""Massages the request line into a GeminiRequest object."""
return GeminiRequest(self.request,request_line,buffer) # set up GeminiRequest object
def header(self,response_code,meta=""):
"""Sends a response header down the line. Will default to the entry in self.DEFAULT_META if it exists and meta is not provided."""
if not meta: meta = self.DEFAULT_META.get(response_code,"")
self.request.sendall(f"{response_code!s} {meta}\r\n".encode("utf-8"))
def preflight(self,request):
"""Preflight checks. Is the request for a URL we can serve?"""
if request.hostname not in self.HOSTS:
self.header(53) # refuse proxy requests
return False
port = request.port or 1965 # default to the default port
if port != self.PORT:
self.header(53) # refuse proxy requests
return False
return True # otherwise we're good
def handle_gemini(self,request):
"""Basic static file server. Default for gemini URLs."""
path = pathlib.Path(request.path.strip("/"))
file = pathlib.Path(os.path.normpath(request.path.strip("/")))
if file.is_absolute() or str(file).startswith(".."):
self.header(59)
return
filesystem = pathlib.Path(self.ROOT)/request.hostname/file
try:
if not os.access(filesystem,os.R_OK):
self.header(51) # not found
return
except OSError: # some OS-related error, treat it like it doesn't exist
self.header(51)
return
if filesystem.is_dir():
if (tmp:=filesystem/pathlib.Path("index.gmi")).exists():
filesystem = tmp
else:
self.directory_list(request,filesystem)
return
if not filesystem.exists():
self.header(51) # not found
return
else: # it exists and it's a file
self.send_file(request,filesystem)
def directory_list(self,request,dir):
"""Directory listing. I haven't implemented it yet, so it just returns a 40 error."""
self.header(40,"Resource unavailable") # NYI
def send_file(self,request,file):
"""Send the file at pathlib.Path object file to the request at request."""
mimetype = self.guess_mimetype(file)
self.header(20,mimetype)
with file.open("rb") as f:
while (data:=f.read(2048)):
request.send(data)
def guess_mimetype(self,path):
"""Use self.mime mimetypes.MimeTypes instance to guess mimetypes. Defaults to application/octet-stream."""
type, encoding = self.mime.guess_type(path.name)
if encoding: return f"{type}; charset={encoding}"
else: return type or "application/octet-stream"
def setup_overrideable(self):
"""Setting up self.DEFAULT_META and self.mime. If your mixin requires special setup override this method and call super().setup_overrideable(self)."""
self.DEFAULT_META = {}
self.DEFAULT_META.update(self.DEFAULT_DEFAULT_META)
self.mime = mimetypes.MimeTypes()
self.mime.add_type("text/gemini",".gmi")
self.mime.add_type("text/gemini",".gemini")
| 3.15625
| 3
|
vel/launcher.py
|
cclauss/vel
| 0
|
12778935
|
<reponame>cclauss/vel<gh_stars>0
#!/usr/bin/env python
import argparse
import datetime as dtm
from vel.api import ModelConfig
from vel.util.random import set_seed
from vel.internals.parser import Parser
def main():
""" Paperboy entry point - parse the arguments and run a command """
parser = argparse.ArgumentParser(description='Paperboy deep learning launcher')
parser.add_argument('config', metavar='FILENAME', help='Configuration file for the run')
parser.add_argument('command', metavar='COMMAND', help='A command to run')
parser.add_argument('varargs', nargs='*', metavar='VARARGS', help='Extra options to the command')
parser.add_argument('-r', '--run_number', type=int, default=0, help="A run number")
parser.add_argument('-d', '--device', default='cuda', help="A device to run the model on")
parser.add_argument('-s', '--seed', type=int, default=None, help="Random seed for the project")
parser.add_argument(
'-p', '--param', type=str, metavar='NAME=VALUE', action='append', default=[],
help="Configuration parameters"
)
parser.add_argument('--reset', action='store_true', default=False, help="Overwrite existing model storage")
args = parser.parse_args()
model_config = ModelConfig.from_file(
args.config, args.run_number, reset=args.reset, device=args.device, seed=args.seed,
params={k: v for (k, v) in (Parser.parse_equality(eq) for eq in args.param)}
)
# Set seed already in the launcher
set_seed(model_config.seed)
model_config.banner(args.command)
model_config.run_command(args.command, args.varargs)
model_config.quit_banner()
if __name__ == '__main__':
main()
| 2.5
| 2
|
app/shop/migrations/0002_auto_20201010_1116.py
|
chriskmamo/greenvoice
| 0
|
12778936
|
<reponame>chriskmamo/greenvoice<filename>app/shop/migrations/0002_auto_20201010_1116.py<gh_stars>0
# Generated by Django 3.0.10 on 2020-10-10 11:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('taxonomies', '0001_initial'),
('users', '0001_initial'),
('shop', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='wishlistitem',
name='customer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='wishlist_item', to='users.Customer', verbose_name='customer'),
),
migrations.AddField(
model_name='wishlistitem',
name='product_manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='wishlist_item', to='shop.ProductManager', verbose_name='product manager'),
),
migrations.AddField(
model_name='productoption',
name='product_manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product_option', to='shop.ProductManager', verbose_name='product and color'),
),
migrations.AddField(
model_name='productoption',
name='size',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomies.Size', verbose_name='size'),
),
migrations.AddField(
model_name='productmanager',
name='color',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomies.Color', verbose_name='color'),
),
migrations.AddField(
model_name='productmanager',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product_manager', to='shop.Product', verbose_name='product'),
),
migrations.AddField(
model_name='product',
name='brand',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='product', to='shop.Brand', verbose_name='brand'),
),
migrations.AddField(
model_name='product',
name='category',
field=models.ManyToManyField(blank=True, to='taxonomies.Category', verbose_name='category'),
),
migrations.AddField(
model_name='product',
name='target_group',
field=models.ManyToManyField(blank=True, to='taxonomies.TargetGroup', verbose_name='target group'),
),
migrations.AddField(
model_name='orderitem',
name='customer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Customer', verbose_name='customer'),
),
migrations.AddField(
model_name='orderitem',
name='order',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='shop.Order', verbose_name='order'),
),
migrations.AddField(
model_name='orderitem',
name='product_option',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='shop.ProductOption', verbose_name='product'),
),
migrations.AddField(
model_name='order',
name='customer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customer', to='users.Customer', verbose_name='customer'),
),
migrations.AddField(
model_name='brandsettingssales',
name='brand_settings',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sale', to='shop.BrandSettings', verbose_name='brand settings'),
),
migrations.AddField(
model_name='brandsettings',
name='brand',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='settings', to='shop.Brand', verbose_name='brand'),
),
migrations.AddField(
model_name='brandbranding',
name='brand',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='branding', to='shop.Brand', verbose_name='brand'),
),
migrations.AddField(
model_name='basictaxzones',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomies.Country', verbose_name='country'),
),
migrations.AddField(
model_name='basictaxzones',
name='tax',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tax_zones', to='shop.BasicTax', verbose_name='tax'),
),
migrations.AddField(
model_name='basicimprint',
name='company_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='taxonomies.CompanyType', verbose_name='company type'),
),
migrations.AddField(
model_name='basicimprint',
name='country',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='taxonomies.Country', verbose_name='country'),
),
migrations.AddField(
model_name='basicbanking',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomies.Country', verbose_name='country'),
),
migrations.AddField(
model_name='basicbanking',
name='currency',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomies.Currency', verbose_name='currency'),
),
migrations.AddField(
model_name='productstatus',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='status', to='shop.Product', verbose_name='product'),
),
migrations.AddField(
model_name='productoptionstatus',
name='product_option',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='status', to='shop.ProductOption', verbose_name='product option'),
),
migrations.AddField(
model_name='productmanagerstatus',
name='product_manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='status', to='shop.ProductManager', verbose_name='product manager'),
),
migrations.AddField(
model_name='productimage',
name='product_manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='image', to='shop.ProductManager', verbose_name='product'),
),
migrations.AddField(
model_name='productbrandimage',
name='product_manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='brand_image', to='shop.ProductManager', verbose_name='product'),
),
migrations.AddField(
model_name='brandsettingsstatus',
name='brand_settings',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status', to='shop.BrandSettings', verbose_name='brand settings'),
),
migrations.AddField(
model_name='brandimprint',
name='brand',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='imprint', to='shop.Brand', verbose_name='brand'),
),
]
| 1.515625
| 2
|
cloudrunner_server/db/versions/330568e8928c_added_phone_field_for_user.py
|
ttrifonov/cloudrunner-server
| 2
|
12778937
|
"""Added phone field for User
Revision ID: 330568e8928c
Revises: <PASSWORD>
Create Date: 2015-02-05 16:53:40.517660
"""
# revision identifiers, used by Alembic.
revision = '330568e8928c'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('phone', sa.String(length=100), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'phone')
### end Alembic commands ###
| 1.34375
| 1
|
cryptoquant/app/cta_strategy/strategies/macd_strategy.py
|
studyquant/StudyQuant
| 74
|
12778938
|
<filename>cryptoquant/app/cta_strategy/strategies/macd_strategy.py
from cryptoquant.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
ArrayManager,
)
import talib
from cryptoquant.trader.object import OrderData, Direction, Exchange, Interval, Offset, Status, Product, OptionType, \
OrderType, TradeData, ContractData
# from cryptoquant.app.cta_signal.macd import Macd
class MacdStrategy(CtaTemplate):
""""""
author = "Rudy"
shortperiod = 12 # 快线周期
longperiod = 26 # 慢线周
smoothperiod = 9 # Signal平滑周期
fixed_size = 1
# ATR参数
atr_window = 30
# ATR 的倍数
sl_multiplier = 2.1
# 参数
parameters = ["shortperiod", "longperiod",
"smoothperiod", "atr_window",
"sl_multiplier"]
# 变量
variables = ["", "", "","","pos","signal"]
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
# self.bg5 = BarGenerator(self.on_bar, 5, self.on_5min_bar)
# self.am5 = ArrayManager()
self.bg15 = BarGenerator(self.on_bar, 60, self.on_15min_bar)
self.am15 = ArrayManager()
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
self.load_bar(10)
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.bg5.update_tick(tick)
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
# self.bg5.update_bar(bar)
self.bg15.update_bar(bar)
def on_5min_bar(self, bar: BarData):
""""""
self.am5.update_bar(bar)
if not self.am5.inited:
return
# self.rsi_value = self.am5.rsi(self.rsi_window)
self.put_event()
# staticmethod
def macd(self,kline, SHORTPERIODSHORTPERIOD, LONGPERIOD, SMOOTHPERIOD):
"""
close# 收盘价
SHORTPERIODSHORTPERIOD = 12 # 快线周期
LONGPERIOD = 26 # 慢线周期
SMOOTHPERIOD = 9 # Signal平滑周期
返回交易信号
"""
close = kline['close']
macd, signal, self.hist = talib.MACD(close, SHORTPERIODSHORTPERIOD, LONGPERIOD,SMOOTHPERIOD) # 短期MACD, 长期signal, # Hist,短线-长线
return macd, signal, self.hist
def signal(self):
if self.hist.iloc[-2] < 0 and self.hist.iloc[-1] > 0:
signal = 1
elif self.hist.iloc[-2] > 0 and self.hist.iloc[-1] < 0:
signal = -1
else:
signal = 0
return signal
def handle_data(self,kline):
# 计算mace指标
macd, signal, hist = self.macd(kline,12,26,9)
signal = self.signal()
current_price = kline['close'].iloc[-1]
high_price = kline['high'].iloc[-1]
low_price = kline['low'].iloc[-1]
# 计算ATR
self.atr_value = self.am15.atr(self.atr_window)
# ------------策略逻辑 ------------------
if self.pos == 0:
if signal == 1:
self.buy(current_price, 1)
if signal == -1:
self.short(current_price, 1)
elif self.pos > 0:
if signal == -1:
self.sell(current_price, 1)
self.short(current_price, 1)
elif self.pos < 0:
if signal == 1:
self.cover(current_price, 1)
self.buy(current_price, 1)
# ------------仓位管理逻辑 ------------------
if self.pos == 0:
self.intra_trade_high = high_price
self.intra_trade_low = low_price
elif self.pos > 0:
self.intra_trade_high = max(self.intra_trade_high, high_price)
self.intra_trade_low = low_price
self.long_stop = self.intra_trade_high - self.atr_value * self.sl_multiplier
self.sell(self.long_stop, abs(self.pos), True)
# if signal == -1:
# # self.cover(current_price, 1)
# self.sell(current_price, 1)
elif self.pos < 0:
self.intra_trade_high = high_price
self.intra_trade_low = min(self.intra_trade_low, low_price)
self.short_stop = self.intra_trade_low + self.atr_value * self.sl_multiplier
self.cover(self.short_stop, abs(self.pos), True)
# if signal == 1:
# # self.cover(current_price, 1)
# self.buy(current_price, 1)
def on_15min_bar(self, bar: BarData):
"""
MACD 交易策略
:param bar:
:return:
"""
self.cancel_all()
self.am15.update_bar(bar)
if not self.am15.inited:
return
kline = self.am15.get_dataframe()
self.handle_data(kline)
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
pass
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
| 2.203125
| 2
|
documentation/environment_canada/ec_adhesion.py
|
gauteh/OilLibrary
| 11
|
12778939
|
from ec_models import Adhesion
from ec_xl_parse import get_oil_properties_by_category
from ec_oil_props import get_oil_weathering
from ec_oil_misc import g_cm_2_to_kg_m_2
def get_oil_adhesions(oil_columns, field_indexes):
'''
Getting the adhesion is fairly straightforward. We simply get the
value in g/cm^2 and convert to kg/m^2.
Dimensional parameters are simply (weathering).
'''
weathering = get_oil_weathering(oil_columns, field_indexes)
adhesions = get_adhesions_by_weathering(oil_columns,
field_indexes,
weathering)
return adhesions
def get_adhesions_by_weathering(oil_columns, field_indexes, weathering):
adhesions = []
props = get_oil_properties_by_category(oil_columns, field_indexes,
'adhesion_g_cm2_ests_1996')
prop_names = props.keys()
for idx, vals in enumerate(zip(*props.values())):
adhesion_kwargs = build_adhesion_kwargs(prop_names, vals,
weathering[idx])
adhesions.append(adhesion_kwargs)
return [Adhesion(**a) for a in adhesions
if a['kg_m_2'] is not None]
def build_adhesion_kwargs(prop_names, values, weathering):
'''
Build adhesion properties dictionary suitable to be passed in as
keyword args.
- prop_names: The list of property names
- values: A list of Excel cell objects representing the properties.
- weathering: The fractional oil weathering amount.
'''
adhesion_kwargs = dict(zip(prop_names, [v[0].value for v in values]))
adhesion_kwargs['weathering'] = weathering
adhesion_kwargs['kg_m_2'] = g_cm_2_to_kg_m_2(adhesion_kwargs['adhesion'])
return adhesion_kwargs
| 2.5625
| 3
|
bubbleimg/imgdownload/sdss/test/test_sdssimgloader_init.py
|
aileisun/bubblepy
| 3
|
12778940
|
<filename>bubbleimg/imgdownload/sdss/test/test_sdssimgloader_init.py<gh_stars>1-10
# test_sdssimgloader_init.py
# ALS 2017/05/02
"""
to be used with pytest
test sets for sdssimgloader
test suite init
"""
import numpy as np
import astropy.table as at
import astropy.units as u
import shutil
import os
import pytest
from ..sdssimgloader import sdssimgLoader
from ....obsobj import obsObj
ra = 150.0547735
dec = 12.7073027
img_width = 20*u.arcsec
img_height = 20*u.arcsec
dir_obj = './testing/SDSSJ1000+1242/'
dir_parent1 = './testing/'
dir_parent2 = './testing2/'
@pytest.fixture(scope="module", autouse=True)
def setUp_tearDown():
""" rm ./testing/ and ./test2/ before and after test"""
# setup
if os.path.isdir(dir_parent1):
shutil.rmtree(dir_parent1)
if os.path.isdir(dir_parent2):
shutil.rmtree(dir_parent2)
yield
# tear down
if os.path.isdir(dir_parent1):
shutil.rmtree(dir_parent1)
if os.path.isdir(dir_parent2):
shutil.rmtree(dir_parent2)
@pytest.fixture
def L_radec():
""" returns a sdssimgLoader object initiated with the ra dec above"""
return sdssimgLoader(ra=ra , dec=dec, dir_obj=dir_obj, img_width=img_width, img_height=img_height)
def test_instantiate_SDSSimgLoader_radec(L_radec):
"""
test that sdssimgLoader can be instantiated with ra dec
"""
L = L_radec
assert isinstance(L, sdssimgLoader)
assert L.ra == ra
assert L.img_width == img_width
assert L.img_height == img_height
assert L.dir_obj == dir_obj
def test_instantiate_SDSSimgLoader_obsobj():
"""
test that sdssimgLoader can be instantiated with obsobj
"""
obj = obsObj(ra=ra, dec=dec, dir_parent=dir_parent2)
L = sdssimgLoader(obj=obj, img_width=img_width, img_height=img_height)
assert isinstance(L, sdssimgLoader)
assert L.ra == ra
assert L.ra == obj.ra
assert L.img_height == img_height
assert L.dir_obj == obj.dir_obj
assert L.ra == L.obj.ra
assert L.dec == L.obj.dec
def test_instantiate_SDSSimgLoader_error_radec_obsobj():
"""
test that an error being raised when both obsobj and ra/dec/dir_obj are fed to sdssimgLoader
"""
obj = obsObj(ra=ra, dec=dec, dir_parent=dir_parent2)
with pytest.raises(Exception):
L = sdssimgLoader(ra=ra , dec=dec, dir_obj=dir_obj, obj=obj, img_width=img_width, img_height=img_height)
def test_instantiate_SDSSimgLoader_error():
"""
test that an error being raised when none of obsobj or ra/dec/dir_obj are fed to sdssimgLoader
"""
with pytest.raises(TypeError):
L = sdssimgLoader(img_width=img_width, img_height=img_height)
def test_init_survey(L_radec):
L = L_radec
assert L.survey == 'sdss'
assert L.ra == ra
assert L.img_width == img_width
assert L.img_height == img_height
assert L.dir_obj == dir_obj
def test_add_obj_sdss(L_radec):
"""
test that the function _add_obj_sdss adds/updates L.obj property based on L.ra, dec, dir_obj, and also properly downloads xid.csv and photoobj.csv
"""
L = L_radec
del L_radec.obj
assert not hasattr(L, 'obj')
L.add_obj_sdss(update=False)
assert hasattr(L, 'obj')
assert L.obj.ra == L.ra
assert L.obj.dec == L.dec
assert L.obj.dir_obj == L.dir_obj
assert hasattr(L.obj, 'sdss')
assert hasattr(L.obj.sdss, 'xid')
assert os.path.isfile(L.dir_obj+'sdss_xid.csv')
assert os.path.isfile(L.dir_obj+'sdss_photoobj.csv')
xid = L.obj.sdss.xid
# check that L.obj can be updated to the right things
L.obj = 'testing'
L.add_obj_sdss(update=True)
assert L.obj.ra == L.ra
assert L.obj.dir_obj == L.dir_obj
assert L.obj.sdss.xid == xid
def test_instantiate_SDSSimgLoader_floatwidth():
"""
test img_width can be input with float (with units assumed to be pix)
"""
L = sdssimgLoader(ra=ra , dec=dec, dir_obj=dir_obj, img_width=64., img_height=64)
assert isinstance(L, sdssimgLoader)
assert L.ra == ra
assert L.img_width == 64*u.pix
assert L.img_height == 64*u.pix
assert L.dir_obj == dir_obj
def test_instantiate_SDSSimgLoader_pixwidth():
"""
test img_width can be input with float (with units assumed to be pix)
"""
L = sdssimgLoader(ra=ra , dec=dec, dir_obj=dir_obj, img_width=64.*u.pix, img_height=64.*u.pix)
assert isinstance(L, sdssimgLoader)
assert L.ra == ra
assert L.img_width == 64*u.pix
assert L.img_height == 64*u.pix
assert L.dir_obj == dir_obj
def test_transform_img_widthheight_unit_to_pix(L_radec):
L = L_radec
sdsspixsize = 0.396*u.arcsec/u.pix
assert L.img_width_pix == np.floor((L.img_width/sdsspixsize).to(u.pix)).value
assert L.img_height_pix == np.floor((L.img_height/sdsspixsize).to(u.pix)).value
def test_SDSSimgLoader_get_img_width_pix(L_radec):
L = L_radec
print(L.img_width_pix)
assert type(L.img_width_pix) is int
assert (L.img_width_pix) == 50.
| 2.140625
| 2
|
algotrade-bot-main/bot.py
|
ChoiceCoin/DeFi
| 2
|
12778941
|
from dataclasses import dataclass
import time
from tinyman.v1.client import TinymanTestnetClient, TinymanMainnetClient
from utils import get_trades
from colorama import Fore
@dataclass
class Account:
"""
DataClass For Bot Account
"""
address: str
private_key: str
class Bot:
def __init__(self, account: Account, network: str, interval: int):
"""
Args:
- account: Account object containing address and private_key
- network: "testnet" or "mainnet".
- trade: contains trade info
- interval: sleeping interval for bot in milliseconds
"""
self.account = account
self.network = network
self.interval = interval
self.client = TinymanMainnetClient(user_address=account.address) if network == "mainnet" else TinymanTestnetClient(user_address=account.address)
def run(self):
print(Fore.GREEN, "Bot Is Running ...")
if not self.client.is_opted_in():
print(Fore.GREEN, "Optin In Progress ...")
self._optin()
while True:
trades = get_trades(self.network, self.account.address)
if not trades:
print(Fore.RED, "No Trade To Execute")
break
for trade in trades:
self._execute(trade)
print(Fore.GREEN, f'Bot Sleeping For {self.interval} Seconds ...')
time.sleep(self.interval)
def _optin(self):
"""
Opts In TinyMan App into Acount
"""
transaction_group = self.client.prepare_app_optin_transactions()
self._submit_txn(transaction_group)
def _execute(self, trade):
"""
Executes A Trade.
Args:
- trade: An Instance of Trade class in mongo db
"""
t_asset1 = trade.asset1
t_asset2 = trade.asset2
t_asset_in = trade.asset_in
asset1 = self.client.fetch_asset(int(t_asset1.asset_id))
asset2 = self.client.fetch_asset(int(t_asset2.asset_id))
pool = self.client.fetch_pool(asset1, asset2)
if t_asset_in.asset_id != t_asset2.asset_id:
quote = pool.fetch_fixed_input_swap_quote(
asset1(trade.asset_in_amt*10**asset1.decimals), float(trade.slippage))
else:
quote = pool.fetch_fixed_input_swap_quote(
asset2(trade.asset_in_amt*10**asset2.decimals), float(trade.slippage))
amt_in = quote.amount_in_with_slippage
amt_out = quote.amount_out_with_slippage
amt_in = amt_in.amount/10**amt_in.asset.decimals
amt_out = amt_out.amount/10**amt_out.asset.decimals
price = amt_out/amt_in
if price >= float(trade.min_sell_price):
self._create_swap_txn(quote, pool)
if trade.do_redeem:
self._redeem(pool, t_asset_in, t_asset2, t_asset1)
trade.is_completed = True
trade.save()
else:
print(Fore.RED, f"Price Target Not Reached, Moving To The Next Trade...")
def _create_swap_txn(self, quote, pool):
transaction_group = pool.prepare_swap_transactions_from_quote(quote)
self._submit_txn(transaction_group)
def _redeem(self, pool, asset_in, asset2, asset1):
excess = pool.fetch_excess_amounts()
if asset_in.asset_id != asset2.asset_id:
if asset2 in excess:
self._submit_redeem(asset2, excess, pool)
else:
if asset1 in excess:
self._submit_redeem(asset1, excess, pool)
def _submit_redeem(self, asset, excess, pool):
amount = excess[asset]
transaction_group = pool.prepare_redeem_transactions(amount)
self._submit_txn(transaction_group)
def _submit_txn(self, txn):
txn.sign_with_private_key(
self.account.address, self.account.private_key)
self.client.submit(txn, wait=True)
| 2.765625
| 3
|
app.py
|
FrankchingKang/BasketbakkTeamStatusTool
| 0
|
12778942
|
<reponame>FrankchingKang/BasketbakkTeamStatusTool
import constants
import os
import copy
def clear_screen():
os.system("cls" if os.name == "nt" else "clear")
def conver_height(Players):
for player in Players:
l_height = player['height'].split()
player['height'] = int(l_height[0])
return Players
def conver_experience(Players):
for player in Players:
if player['experience'] == "YES":
player['experience'] = True
else:
player['experience'] = False
return Players
def conver_guardians(Players):
for player in Players:
guardians = player['guardians'].split()
player['guardians'] = []
try:
guardians.remove("and")
# have "and"
player['guardians'].append(" ".join(guardians[:2]))
player['guardians'].append(" ".join(guardians[2:]))
except ValueError:
# no "and"
player['guardians'].append(" ".join(guardians))
return Players
def divide_players(Players):
ExPlayers = []
NoExPlayers = []
for player in Players:
if player['experience'] == True:
ExPlayers.append(player)
else:
NoExPlayers.append(player)
return ExPlayers, NoExPlayers
def assign_players_to_three_teams(Players):
# divide experienced player and non experienced player
ExPlayers, NoExPlayers = divide_players(Players)
Sortedplayers = ExPlayers + NoExPlayers
TeamOne, TeamTwo, TeamThree = [], [], [];
while Sortedplayers != []:
if len(Sortedplayers) % 3 == 0:
TeamOne.append(Sortedplayers.pop())
elif len(Sortedplayers) % 3 == 1:
TeamTwo.append(Sortedplayers.pop())
else:
TeamThree.append(Sortedplayers.pop())
return TeamOne, TeamTwo, TeamThree
def total_experienced_players(Players):
count = 0
for player in Players:
if player['experience'] == True:
count += 1
return count
def total_inexperienced_players(Players):
count = 0
for player in Players:
if player['experience'] == False:
count += 1
return count
def average_height(Players):
TotalHeight = 0
for player in Players:
TotalHeight += player['height']
return TotalHeight / len(Players)
def show_players_info(Players):
print("Total players: {}".format(len(Players)), end = "; ")
print("Experience Players: {}".format(total_experienced_players(Players)), end = "; ")
print("Inexperience Players: {}".format(total_inexperienced_players(Players)), end = "; ")
print("Average height of the team: {}".format(average_height(Players)))
print("\nPlayers on Team:")
PlayerName = [player['name'] for player in Players]
print("{}".format(", ".join(PlayerName)))
print("\nGuardians of all Players on Team:")
GuardiansOfPlayers = [guardian for player in Players for guardian in player['guardians']]
print(", ".join(GuardiansOfPlayers))
if __name__ == "__main__":
# get date from constants.py
# if using "=" will automatically modify the original data
#Teams = [team for team in constants.TEAMS]
#Players = [player for player in constants.PLAYERS]
#if using deepcopy will not change the original data
Teams = copy.deepcopy(constants.TEAMS)
Players = copy.deepcopy(constants.PLAYERS)
# conver height of Players to int
Players = conver_height(Players)
# conver experience to Boolean
Players = conver_experience(Players)
# conver guardians string to list and delete "and"
Players = conver_guardians(Players)
# assign player to three teams
league = {team: player for team, player in zip(Teams, assign_players_to_three_teams(Players))}
while True:
clear_screen()
print("Here are your choices:")
print("1) Display Team Stats")
print("2) Quit")
try:
action = input("Enter an option > ")
except KeyboardInterrupt:
action = input("\ndo you really want to exit? Y/N >")
if action.upper() == "Y":
os._exit(0)
if action.upper == "QUIT" or action.lower() == "quit" or action == "2":
break
elif action == "1":
while True:
clear_screen()
print("1) Panthers")
print("2) Bandits")
print("3) Warriors")
try:
action = input("Enter an option > ")
except KeyboardInterrupt:
action = input("\ndo you really want to exit? Y/N >")
if action.upper() == "Y":
os._exit(0)
if action == "1" or action == "2" or action == "3":
action = int(action)
clear_screen()
TeamSelected = Teams[action-1]
PlayersOfTeam = league[TeamSelected]
print("\nTeam: {} status:".format(TeamSelected))
print("-"*50)
show_players_info(PlayersOfTeam)
print("-"*50)
break
else:
print("Please enter 1 to 3 to select your action. Thanks")
try:
input("Press enter to continue > ")
except KeyboardInterrupt:
action = input("\ndo you really want to exit? Y/N >")
if action.upper() == "Y":
os._exit(0)
continue
else:
continue
else:
clear_screen()
print("Please enter 1 or 2 to select your action. Thanks")
try:
input("Press enter to continue > ")
except KeyboardInterrupt:
action = input("\ndo you really want to exit? Y/N >")
if action.upper() == "Y":
os._exit(0)
| 3.046875
| 3
|
events/matsucon2018/migrations/0003_signupextra_shirt_size.py
|
darkismus/kompassi
| 13
|
12778943
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-03 21:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('matsucon2018', '0002_auto_20180203_2326'),
]
operations = [
migrations.AddField(
model_name='signupextra',
name='shirt_size',
field=models.CharField(choices=[('NO_SHIRT', 'En halua paitaa'), ('S', 'S'), ('M', 'M'), ('L', 'L'), ('XL', 'XL'), ('OTHER', 'Muu koko (kerro Vapaa sana -kentässä)')], default='NO_SHIRT', max_length=8, verbose_name='Työvoiman T-paidan koko'),
),
]
| 1.898438
| 2
|
Packs/Imperva_WAF/Integrations/ImpervaWAF/ImpervaWAF.py
|
diCagri/content
| 799
|
12778944
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import requests
import traceback
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
INTEGRATION_CONTEXT_NAME = 'ImpervaWAF'
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
session_id = ''
def do_request(self, method, url_suffix, json_data=None):
if not self.session_id:
self.login()
res = self._http_request(method, f'SecureSphere/api/v1/{url_suffix}', json_data=json_data,
headers={'Cookie': self.session_id}, ok_codes=(200, 401, 406), resp_type='response')
if res.status_code == 401:
self.login()
res = self._http_request(method, f'SecureSphere/api/v1/{url_suffix}', json_data=json_data,
headers={'Cookie': self.session_id}, ok_codes=(200, 401, 406),
resp_type='response')
if res.text:
res = res.json()
else:
res = {}
extract_errors(res)
return res
def login(self):
res = self._http_request('POST', 'SecureSphere/api/v1/auth/session', auth=self._auth)
extract_errors(res)
self.session_id = res.get('session-id')
def get_ip_group_entities(self, group_name, table_name):
raw_res = self.do_request('GET', f'conf/ipGroups/{group_name}')
entries = []
for entry in raw_res.get('entries'):
entries.append({'Type': entry.get('type'),
'IpAddressFrom': entry.get('ipAddressFrom'),
'IpAddressTo': entry.get('ipAddressTo'),
'NetworkAddress': entry.get('networkAddress'),
'CidrMask': entry.get('cidrMask')})
human_readable = tableToMarkdown(table_name, entries, removeNull=True,
headers=['Type', 'IpAddressFrom', 'IpAddressTo', 'NetworkAddress', 'CidrMask'])
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.IpGroup(val.Name===obj.Name)':
{'Name': group_name, 'Entries': entries}}
return human_readable, entry_context, raw_res
def get_custom_policy_outputs(self, policy_name, table_name):
raw_res = self.do_request('GET', f'conf/policies/security/webServiceCustomPolicies/{policy_name}')
policy = {'Name': policy_name,
'Enabled': raw_res.get('enabled'),
'OneAlertPerSession': raw_res.get('oneAlertPerSession'),
'DisplayResponsePage': raw_res.get('displayResponsePage'),
'Severity': raw_res.get('severity'),
'Action': raw_res.get('action'),
'FollowedAction': raw_res.get('followedAction'),
'ApplyTo': raw_res.get('applyTo'),
'MatchCriteria': raw_res.get('matchCriteria')}
hr_policy = policy.copy()
del hr_policy['MatchCriteria']
del hr_policy['ApplyTo']
human_readable = tableToMarkdown(table_name, hr_policy, removeNull=True)
if raw_res.get('applyTo'):
human_readable += '\n\n' + tableToMarkdown('Services to apply the policy to', raw_res.get('applyTo'),
removeNull=True)
for match in raw_res.get('matchCriteria', []):
tmp_match = match.copy()
operation = match['operation']
match_type = match['type']
# generate human readable for sourceIpAddresses type
if match_type == 'sourceIpAddresses':
if tmp_match.get('userDefined'):
for i, element in enumerate(tmp_match['userDefined']):
tmp_match['userDefined'][i] = {'IP Address': tmp_match['userDefined'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n Source IP addresses:',
tmp_match['userDefined'], removeNull=True)
if tmp_match.get('ipGroups'):
for i, element in enumerate(tmp_match['ipGroups']):
tmp_match['ipGroups'][i] = {'Group name': tmp_match['ipGroups'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n IP Groups:',
tmp_match['ipGroups'], removeNull=True)
# generate human readable for sourceGeolocation type
elif match_type == 'sourceGeolocation':
if tmp_match.get('values'):
for i, element in enumerate(tmp_match['values']):
tmp_match['values'][i] = {'Country name': tmp_match['values'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n Countries to match:',
tmp_match['values'], removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.CustomWebPolicy(val.Name===obj.Name)': policy}
return human_readable, entry_context, raw_res
def extract_errors(res):
if not isinstance(res, list) and res.get('errors'):
error_message = ''
for err in res['errors']:
error_message += f'error-code: {err.get("error-code")}, description: {err.get("description")}'
raise Exception(error_message)
def generate_policy_data_body(args):
severity = args.get('severity')
action = args.get('action')
followed_action = args.get('followed-action')
body = {}
if args.get('enabled'):
body['enabled'] = args['enabled'] == 'True'
if args.get('one-alert-per-session'):
body['oneAlertPerSession'] = args['one-alert-per-session'] == 'True'
if args.get('display-response-page'):
body['displayResponsePage'] = args['display-response-page'] == 'True'
if severity:
body['severity'] = severity
if action:
body['action'] = action
if followed_action:
body['followedAction'] = followed_action
return body
def generate_match_criteria(body, args):
geo_location_criteria_operation = args.get('geo-location-criteria-operation')
ip_addresses_criteria_operation = args.get('ip-addresses-criteria-operation')
ip_groups = args.get('ip-groups', '')
ip_addreses = args.get('ip-addresses', '')
country_names = args.get('country-names', '')
match_criteria = []
if geo_location_criteria_operation:
if not country_names:
raise Exception('country-names argument is empty')
geo_location_match_item = {'type': 'sourceGeolocation',
'operation': geo_location_criteria_operation,
'values': country_names.split(',')}
match_criteria.append(geo_location_match_item)
if ip_addresses_criteria_operation:
if not ip_groups and not ip_addreses:
raise Exception('ip-groups and ip-addresses arguments are empty, please fill at least one of them')
ip_addresses_match_item = {'type': 'sourceIpAddresses',
'operation': ip_addresses_criteria_operation}
if ip_groups:
ip_addresses_match_item['ipGroups'] = ip_groups.split(',')
if ip_addreses:
ip_addresses_match_item['userDefined'] = ip_addreses.split(',')
match_criteria.append(ip_addresses_match_item)
body['matchCriteria'] = match_criteria
return body
def generate_ip_groups_entries(args):
entry_type = args.get('entry-type')
ip_from = args.get('ip-address-from')
ip_to = args.get('ip-address-to')
network_address = args.get('network-address')
cidr_mask = args.get('cidr-mask')
operation = args.get('operation')
json_entries = args.get('json-entries')
if not json_entries:
entry = {}
if entry_type == 'single':
entry['ipAddressFrom'] = ip_from
elif entry_type == 'range':
entry['ipAddressFrom'] = ip_from
entry['ipAddressTo'] = ip_to
elif entry_type == 'network':
entry['networkAddress'] = network_address
entry['cidrMask'] = cidr_mask
else:
raise Exception('entry-type argument is invalid')
entry['type'] = entry_type
entry['operation'] = operation
body = {'entries': [entry]}
else:
try:
json_entries = json.loads(json_entries)
except Exception:
raise Exception(f'Failed to parse json-entries as JSON data, 'f' received object:\n{json_entries}')
body = {'entries': json_entries}
return body
@logger
def test_module(client, args):
raw_res = client.do_request('GET', 'conf/sites')
if raw_res.get('sites'):
demisto.results('ok')
@logger
def ip_group_list_command(client, args):
raw_res = client.do_request('GET', 'conf/ipGroups')
groups = []
if raw_res.get('names'):
groups = raw_res['names']
for i, element in enumerate(groups):
groups[i] = {'Name': groups[i]}
human_readable = tableToMarkdown('IP groups', groups, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.IpGroup(val.Name===obj.Name)': groups}
return_outputs(human_readable, entry_context, raw_res)
@logger
def ip_group_list_entries_command(client, args):
group_name = args.get('ip-group-name')
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'IP group entries for {group_name}')
return_outputs(human_readable, entry_context, raw_res)
@logger
def ip_group_remove_entries_command(client, args):
group_name = args.get('ip-group-name')
raw_res = client.do_request('DELETE', f'conf/ipGroups/{group_name}/clear')
return_outputs(f'The IP group {group_name} is now empty', {}, raw_res)
@logger
def sites_list_command(client, args):
raw_res = client.do_request('GET', 'conf/sites')
sites = [{'Name': site} for site in raw_res.get('sites', [])]
human_readable = tableToMarkdown('All sites in the system', sites, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Site(val.Name===obj.Name)': sites}
return_outputs(human_readable, entry_context, raw_res)
@logger
def server_groups_list_command(client, args):
site = args.get('site-name')
raw_res = client.do_request('GET', f'conf/serverGroups/{site}')
server_groups = []
if raw_res.get('server-groups'):
server_groups = raw_res['server-groups']
for i, element in enumerate(server_groups):
server_groups[i] = {'Name': server_groups[i], 'SiteName': site}
human_readable = tableToMarkdown(f'Server groups in {site}', server_groups, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.ServerGroup(val.Name===obj.Name)': server_groups}
return_outputs(human_readable, entry_context, raw_res)
@logger
def server_group_policies_list_command(client, args):
site = args.get('site-name')
server_group = args.get('server-group-name')
raw_res = client.do_request('GET', f'conf/serverGroups/{site}/{server_group}/securityPolicies')
policies = []
for policy in raw_res:
policies.append({'System': policy.get('system'),
'PolicyName': policy.get('policy-name'),
'PolicyType': policy.get('policy-type'),
'ServerGroup': server_group,
'SiteName': site})
human_readable = tableToMarkdown(f'Policies for {server_group}', policies, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.SecurityPolicy(val.PolicyName===obj.PolicyName)': policies}
return_outputs(human_readable, entry_context, raw_res)
@logger
def custom_policy_list_command(client, args):
raw_res = client.do_request('GET', 'conf/policies/security/webServiceCustomPolicies')
policies = []
if raw_res.get('customWebPolicies'):
policies = raw_res['customWebPolicies']
for i, element in enumerate(policies):
policies[i] = {'Name': policies[i]}
human_readable = tableToMarkdown('Custom web policies', policies, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.CustomWebPolicy(val.Name===obj.Name)': policies}
return_outputs(human_readable, entry_context, raw_res)
@logger
def get_custom_policy_command(client, args):
policy_name = args.get('policy-name')
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy data for {policy_name}')
return_outputs(human_readable, entry_context, raw_res)
@logger
def create_ip_group_command(client, args):
group_name = args.get('group-name')
body = generate_ip_groups_entries(args)
client.do_request('POST', f'conf/ipGroups/{group_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'Group {group_name} created successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def update_ip_group_command(client, args):
group_name = args.get('group-name')
body = generate_ip_groups_entries(args)
client.do_request('PUT', f'conf/ipGroups/{group_name}/data', json_data=body)
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'Group {group_name} updated successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def delete_ip_group_command(client, args):
group_name = args.get('group-name')
raw_res = client.do_request('DELETE', f'conf/ipGroups/{group_name}')
return_outputs(f'Group {group_name} deleted successfully', {}, raw_res)
@logger
def create_custom_policy_command(client, args):
policy_name = args.get('policy-name')
site = args.get('site-name-to-apply')
server_group = args.get('server-group-name-to-apply')
web_service = args.get('web-service-name-to-apply')
match_criteria_json = args.get('match-criteria-json')
body = generate_policy_data_body(args)
if match_criteria_json and not isinstance(match_criteria_json, dict):
try:
match_criteria_json = json.loads(match_criteria_json)
except Exception:
raise Exception(f'Failed to parse match-criteria-json as JSON data,'
f' received object:\n{match_criteria_json}')
body['matchCriteria'] = match_criteria_json
else:
body = generate_match_criteria(body, args)
body['applyTo'] = [{'siteName': site, 'serverGroupName': server_group, 'webServiceName': web_service}]
client.do_request('POST', f'conf/policies/security/webServiceCustomPolicies/{policy_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy {policy_name} created successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def update_custom_policy_command(client, args):
policy_name = args.get('policy-name')
site = args.get('site-name-to-apply')
server_group = args.get('server-group-name-to-apply', '')
web_service = args.get('web-service-name-to-apply', '')
apply_operation = args.get('apply-operation', '')
match_criteria_json = args.get('match-criteria-json')
body = generate_policy_data_body(args)
if match_criteria_json and not isinstance(match_criteria_json, dict):
try:
match_criteria_json = json.loads(match_criteria_json)
except Exception:
raise DemistoException(f'Failed to parse match-criteria-json as JSON data,'
f' received object:\n{match_criteria_json}')
body['matchCriteria'] = match_criteria_json
else:
body = generate_match_criteria(body, args)
if apply_operation:
body['applyTo'] = [{'operation': apply_operation, 'siteName': site, 'serverGroupName': server_group,
'webServiceName': web_service}]
client.do_request('PUT', f'conf/policies/security/webServiceCustomPolicies/{policy_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy {policy_name} updated successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def delete_custom_policy_command(client, args):
policy_name = args.get('policy-name')
raw_res = client.do_request('DELETE', f'conf/policies/security/webServiceCustomPolicies/{policy_name}')
return_outputs(f'Policy {policy_name} deleted successfully', {}, raw_res)
def main():
params = demisto.params()
# get the service API url
base_url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
credentials = params.get('credentials')
username = credentials['identifier'] if credentials else ''
password = credentials['password'] if credentials else ''
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy)
command = demisto.command()
args = demisto.args()
commands = {'test-module': test_module,
'imperva-waf-ip-group-list': ip_group_list_command,
'imperva-waf-ip-group-list-entries': ip_group_list_entries_command,
'imperva-waf-ip-group-remove-entries': ip_group_remove_entries_command,
'imperva-waf-sites-list': sites_list_command,
'imperva-waf-server-group-list': server_groups_list_command,
'imperva-waf-server-group-list-policies': server_group_policies_list_command,
'imperva-waf-web-service-custom-policy-list': custom_policy_list_command,
'imperva-waf-web-service-custom-policy-get': get_custom_policy_command,
'imperva-waf-ip-group-create': create_ip_group_command,
'imperva-waf-ip-group-update-entries': update_ip_group_command,
'imperva-waf-ip-group-delete': delete_ip_group_command,
'imperva-waf-web-service-custom-policy-create': create_custom_policy_command,
'imperva-waf-web-service-custom-policy-update': update_custom_policy_command,
'imperva-waf-web-service-custom-policy-delete': delete_custom_policy_command,
}
if command in commands:
commands[command](client, args)
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
# Log exceptions
except Exception as e:
return_error(f'Unexpected error: {str(e)}', error=traceback.format_exc())
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 2.0625
| 2
|
LeetCode/May Leetcoding Challenge/Ambiguous Coordinates.py
|
UtkarshPathrabe/Competitive-Coding
| 13
|
12778945
|
class Solution:
def ambiguousCoordinates(self, s: str) -> List[str]:
def make(frag):
N = len(frag)
for d in range(1, N + 1):
left, right = frag[:d], frag[d:]
if ((not left.startswith('0') or left == '0') and (not right.endswith('0'))):
yield left + ('.' if d != N else '') + right
S = s[1: -1]
return ["({}, {})".format(*cand) for i in range(1, len(S)) for cand in itertools.product(make(S[:i]), make(S[i:]))]
| 3.3125
| 3
|
e2e/pages/security_page.py
|
svic/jenkins-configuration
| 0
|
12778946
|
<filename>e2e/pages/security_page.py
import os
import re
from . import JENKINS_HOST
from bok_choy.page_object import PageObject
class SecurityConfigurationPage(PageObject):
url = "http://{}:8080/configureSecurity".format(JENKINS_HOST)
def is_browser_on_page(self):
return "configure global security" in self.browser.title.lower()
def is_dsl_script_security_enabled(self):
enabled = self.q(css='[name="_.useScriptSecurity"]').attrs('checked')[0]
return True if enabled == 'true' else False
def is_security_enabled(self):
enabled = self.q(css='[name="_.useSecurity"]').attrs('checked')[0]
return True if enabled == 'true' else False
def is_cli_remoting_enabled(self):
row_id = self.q(css='[name="jenkins-CLI"]').attrs('id')
cli_checkbox = self.q(css='[nameref="{}"] > td > [name="_.enabled"]'.format(row_id)).attrs('checked')
if cli_checkbox and cli_checkbox[0] == 'true':
return True
else:
return False
def is_gh_oauth_enabled(self):
"""
return true if the `GitHub Web URI` field is present, which will only
appear when GH OAuth is selected, rather than the GH OAuth radio button,
which has no unique CSS identifier
"""
return self.q(css='[name="_.githubWebUri"]').visible
def is_saml_enabled(self):
"""
return true if the `IdpMetadataConfiguration/checkXml` field is present,
which will only appear when SAML is selected, rather than the SAML radio button,
which has no unique CSS identifier
"""
css_query = '[checkurl="/descriptorByName/org.jenkinsci.plugins.saml.IdpMetadataConfiguration/checkXml"]'
return self.q(css=css_query).visible
def get_user_permissions(self, user):
"""
return a list of the permissions enabled for a particular user
"""
user_privileges = []
user_css = '[id="hudson-security-ProjectMatrixAuthorization"] > tbody > [name="[{}]"]'.format(user)
for p in self.q(css='{} > td > input'.format(user_css)).attrs('name'):
privilege_name = re.search(r'\[(?P<name>.*)\]', p).group('name')
privilege_state = self.q(
css='{} > td > [name="{}"]'.format(user_css, p)
).attrs('checked')[0]
if privilege_state == 'true':
user_privileges.append(privilege_name)
return user_privileges
def is_csrf_protection_enabled(self):
enabled = self.q(css='[name="_.csrf"]').attrs('checked')[0]
return True if enabled == 'true' else False
| 2.625
| 3
|
example/trade/post_batch_create_order.py
|
bailzx5522/huobi_Python
| 611
|
12778947
|
<gh_stars>100-1000
import time
from huobi.client.trade import TradeClient
from huobi.constant import *
from huobi.utils import *
trade_client = TradeClient(api_key=g_api_key, secret_key=g_secret_key)
client_order_id_header = str(int(time.time()))
symbol_eosusdt = "eosusdt"
client_order_id_eos_01 = client_order_id_header + symbol_eosusdt + "01"
client_order_id_eos_02 = client_order_id_header + symbol_eosusdt + "02"
client_order_id_eos_03 = client_order_id_header + symbol_eosusdt + "03"
buy_limit_eos_01 = {
"account_id":g_account_id,
"symbol":symbol_eosusdt,
"order_type":OrderType.BUY_LIMIT,
"source":OrderSource.API,
"amount":50,
"price": 0.12,
"client_order_id" : client_order_id_eos_01
}
buy_limit_eos_02 = {
"account_id":g_account_id,
"symbol":symbol_eosusdt,
"order_type":OrderType.BUY_LIMIT,
"source": OrderSource.API,
"amount":7,
"price": 0.80,
"client_order_id" : client_order_id_eos_02
}
buy_limit_eos_03 = {
"account_id":g_account_id,
"symbol":symbol_eosusdt,
"order_type":OrderType.BUY_LIMIT,
"source": OrderSource.API,
"amount":20,
"price": 0.252,
"client_order_id" : client_order_id_eos_03
}
order_config_list = [
buy_limit_eos_01,
buy_limit_eos_02,
buy_limit_eos_03
]
create_result = trade_client.batch_create_order(order_config_list=order_config_list)
LogInfo.output_list(create_result)
order_id_list = []
if create_result and len(create_result):
for item in create_result:
order_id_list.append(item.order_id)
result = trade_client.cancel_orders(symbol_eosusdt, order_id_list)
result.print_object()
| 2
| 2
|
root/plugins/screenshot.py
|
sahaynitin4tellyfun/TG-RenameBot
| 1
|
12778948
|
<filename>root/plugins/screenshot.py
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
import os
import shutil
import time
from root.config import Config
from pyrogram import Client, filters
import pyrogram
logging.getLogger("pyrogram").setLevel(logging.WARNING)
from root.plugins.ffmpeg import generate_screen_shots
from root.utils.utils import progress_for_pyrogram
@Client.on_message(filters.command(["generate_ss","screenshot"]))
async def generate_screen_shot(bot, update):
if update.reply_to_message is not None:
download_location = Config.DOWNLOAD_LOCATION + "/"
a = await bot.send_message(
chat_id=update.chat.id,
text="**✅ Okay... Generating ScreenShots...!**",
reply_to_message_id=update.message_id
)
c_time = time.time()
the_real_download_location = await bot.download_media(
message=update.reply_to_message,
file_name=download_location,
progress=progress_for_pyrogram,
progress_args=(
"**✅ Okay... Generating ScreenShots...!**",
a,
c_time
)
)
if the_real_download_location is not None:
await bot.edit_message_text(
text="**✅ Successfully Generated Screenshots... Now Uploading them 👇🏻**",
chat_id=update.chat.id,
message_id=a.message_id
)
tmp_directory_for_each_user = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id)
if not os.path.isdir(tmp_directory_for_each_user):
os.makedirs(tmp_directory_for_each_user)
images = await generate_screen_shots(
the_real_download_location,
tmp_directory_for_each_user,
False,
5,
9
)
logger.info(images)
await bot.edit_message_text(
text="**🥳 Uploading To Telegram...**",
chat_id=update.chat.id,
message_id=a.message_id
)
media_album_p = []
if images is not None:
i = 0
caption = "__**© Coded By <NAME>**__"
for image in images:
if os.path.exists(image):
if i == 0:
media_album_p.append(
pyrogram.types.InputMediaPhoto(
media=image,
caption=caption,
parse_mode="html"
)
)
else:
media_album_p.append(
pyrogram.types.InputMediaPhoto(
media=image
)
)
i = i + 1
await bot.send_media_group(
chat_id=update.chat.id,
disable_notification=True,
reply_to_message_id=a.message_id,
media=media_album_p
)
#
try:
shutil.rmtree(tmp_directory_for_each_user)
os.remove(the_real_download_location)
except:
pass
await bot.edit_message_text(
text="**😝 Successfully Uploaded ScreenShots... 📸\n✅ Thanks for Using Meh..!**",
chat_id=update.chat.id,
message_id=a.message_id,
disable_web_page_preview=True
)
else:
await bot.send_message(
chat_id=update.chat.id,
text="**😓 You Noobie, Reply to a Telegram Media to Generate ScreenShots...!**",
reply_to_message_id=update.message_id
)
| 2.328125
| 2
|
tool/staticAnalysis/remove_bitfield.py
|
SZU-SE/PERIOD
| 16
|
12778949
|
<reponame>SZU-SE/PERIOD
#!/usr/bin/python3
import os
import sys
import re
# traverse root directory, and list directories as dirs and files as files
for root, dirs, files in os.walk(sys.argv[1]):
for file in files:
if file == "tags":
continue
name = os.path.join(root,file)
if os.path.islink(name):
continue
temp = open('/tmp/temp', 'w')
print(name)
with open(name) as f:
instruct = False
bcount = 0
for line in f:
if re.match(r'struct.*{', line):
instruct = True
bcount += line.count('{')
bcount -= line.count('}')
if bcount == 0:
instruct = False
if instruct:
if re.search(r"^[^?]*:\ *[0-9]+;", line):
line = re.sub(r':\ *[0-9]+;', ';', line)
if re.search(r"^[^?]*:\ *[0-9]+,", line):
line = re.sub(r':\ *[0-9]+,', ',', line)
temp.write(line)
os.rename('/tmp/temp', name)
| 2.703125
| 3
|
agent/command/control/libknot/control.py
|
riszkymf/RESTKnot
| 1
|
12778950
|
<gh_stars>1-10
"""Libknot server control interface wrapper.
Example:
import json
from libknot.control import *
ctl = KnotCtl()
ctl.connect("/var/run/knot/knot.sock")
try:
ctl.send_block(cmd="conf-begin")
resp = ctl.receive_block()
ctl.send_block(cmd="conf-set", section="zone", item="domain", data="test")
resp = ctl.receive_block()
ctl.send_block(cmd="conf-commit")
resp = ctl.receive_block()
ctl.send_block(cmd="conf-read", section="zone", item="domain")
resp = ctl.receive_block()
print(json.dumps(resp, indent=4))
finally:
ctl.send(KnotCtlType.END)
ctl.close()
"""
from ctypes import cdll, c_void_p, c_int, c_char_p, c_uint, byref
from enum import IntEnum
from sys import platform
CTL_ALLOC = None
CTL_FREE = None
CTL_SET_TIMEOUT = None
CTL_CONNECT = None
CTL_CLOSE = None
CTL_SEND = None
CTL_RECEIVE = None
CTL_ERROR = None
def load_lib(path=None):
"""Loads the libknot library."""
if path is None:
path = "libknot.dylib" if platform == "darwin" else "libknot.so"
LIB = cdll.LoadLibrary(path)
global CTL_ALLOC
CTL_ALLOC = LIB.knot_ctl_alloc
CTL_ALLOC.restype = c_void_p
global CTL_FREE
CTL_FREE = LIB.knot_ctl_free
CTL_FREE.argtypes = [c_void_p]
global CTL_SET_TIMEOUT
CTL_SET_TIMEOUT = LIB.knot_ctl_set_timeout
CTL_SET_TIMEOUT.argtypes = [c_void_p, c_int]
global CTL_CONNECT
CTL_CONNECT = LIB.knot_ctl_connect
CTL_CONNECT.restype = c_int
CTL_CONNECT.argtypes = [c_void_p, c_char_p]
global CTL_CLOSE
CTL_CLOSE = LIB.knot_ctl_close
CTL_CLOSE.argtypes = [c_void_p]
global CTL_SEND
CTL_SEND = LIB.knot_ctl_send
CTL_SEND.restype = c_int
CTL_SEND.argtypes = [c_void_p, c_uint, c_void_p]
global CTL_RECEIVE
CTL_RECEIVE = LIB.knot_ctl_receive
CTL_RECEIVE.restype = c_int
CTL_RECEIVE.argtypes = [c_void_p, c_void_p, c_void_p]
global CTL_ERROR
CTL_ERROR = LIB.knot_strerror
CTL_ERROR.restype = c_char_p
CTL_ERROR.argtypes = [c_int]
class KnotCtlError(Exception):
"""Libknot server control error."""
def __init__(self, message, data=None):
"""
@type message: str
@type data: KnotCtlData
"""
self.message = message
self.data = data
def __str__(self):
return "%s (data: %s)" % (self.message, self.data)
class KnotCtlType(IntEnum):
"""Libknot server control data unit types."""
END = 0
DATA = 1
EXTRA = 2
BLOCK = 3
class KnotCtlDataIdx(IntEnum):
"""Libknot server control data unit indices."""
COMMAND = 0
FLAGS = 1
ERROR = 2
SECTION = 3
ITEM = 4
ID = 5
ZONE = 6
OWNER = 7
TTL = 8
TYPE = 9
DATA = 10
FILTER = 11
class KnotCtlData(object):
"""Libknot server control data unit."""
DataArray = c_char_p * len(KnotCtlDataIdx)
def __init__(self):
self.data = self.DataArray()
def __str__(self):
string = str()
for idx in KnotCtlDataIdx:
if self.data[idx]:
if string:
string += ", "
string += "%s = %s" % (idx.name, self.data[idx])
return string
def __getitem__(self, index):
"""Data unit item getter.
@type index: KnotCtlDataIdx
@rtype: str
"""
value = self.data[index]
if not value:
value = str()
return value if isinstance(value, str) else value.decode()
def __setitem__(self, index, value):
"""Data unit item setter.
@type index: KnotCtlDataIdx
@type value: str
"""
self.data[index] = c_char_p(value.encode()) if value else c_char_p()
class KnotCtl(object):
"""Libknot server control interface."""
def __init__(self):
if not CTL_ALLOC:
load_lib()
self.obj = CTL_ALLOC()
def __del__(self):
CTL_FREE(self.obj)
def set_timeout(self, timeout):
"""Sets control socket operations timeout in seconds.
@type timeout: int
"""
CTL_SET_TIMEOUT(self.obj, timeout * 1000)
def connect(self, path):
"""Connect to a specified control UNIX socket.
@type path: str
"""
ret = CTL_CONNECT(self.obj, path.encode())
if ret != 0:
err = CTL_ERROR(ret)
raise KnotCtlError(err if isinstance(err, str) else err.decode())
def close(self):
"""Disconnects from the current control socket."""
CTL_CLOSE(self.obj)
def send(self, data_type, data=None):
"""Sends a data unit to the connected control socket.
@type data_type: KnotCtlType
@type data: KnotCtlData
"""
ret = CTL_SEND(self.obj, data_type,
data.data if data else c_char_p())
if ret != 0:
err = CTL_ERROR(ret)
raise KnotCtlError(err if isinstance(err, str) else err.decode())
def receive(self, data=None):
"""Receives a data unit from the connected control socket.
@type data: KnotCtlData
@rtype: KnotCtlType
"""
data_type = c_uint()
ret = CTL_RECEIVE(self.obj, byref(data_type),
data.data if data else c_char_p())
if ret != 0:
err = CTL_ERROR(ret)
raise KnotCtlError(err if isinstance(err, str) else err.decode())
return KnotCtlType(data_type.value)
def send_block(self, cmd, section=None, item=None, identifier=None, zone=None,
owner=None, ttl=None, rtype=None, data=None, flags=None,
filter=None):
"""Sends a control query block.
@type cmd: str
@type section: str
@type item: str
@type identifier: str
@type zone: str
@type owner: str
@type ttl: str
@type rtype: str
@type data: str
@type filter: str
"""
query = KnotCtlData()
query[KnotCtlDataIdx.COMMAND] = cmd
query[KnotCtlDataIdx.SECTION] = section
query[KnotCtlDataIdx.ITEM] = item
query[KnotCtlDataIdx.ID] = identifier
query[KnotCtlDataIdx.ZONE] = zone
query[KnotCtlDataIdx.OWNER] = owner
query[KnotCtlDataIdx.TTL] = ttl
query[KnotCtlDataIdx.TYPE] = rtype
query[KnotCtlDataIdx.DATA] = data
query[KnotCtlDataIdx.FLAGS] = flags
query[KnotCtlDataIdx.FILTER] = filter
self.send(KnotCtlType.DATA, query)
self.send(KnotCtlType.BLOCK)
def _receive_conf(self, out, reply):
section = reply[KnotCtlDataIdx.SECTION]
ident = reply[KnotCtlDataIdx.ID]
item = reply[KnotCtlDataIdx.ITEM]
data = reply[KnotCtlDataIdx.DATA]
# Add the section if not exists.
if section not in out:
out[section] = dict()
# Add the identifier if not exists.
if ident and ident not in out[section]:
out[section][ident] = dict()
# Return if no item/value.
if not item:
return
item_level = out[section][ident] if ident else out[section]
# Treat alone identifier item differently.
if item in ["id", "domain", "target"]:
if data not in out[section]:
out[section][data] = dict()
else:
if item not in item_level:
item_level[item] = list()
if data:
item_level[item].append(data)
def _receive_zone_status(self, out, reply):
zone = reply[KnotCtlDataIdx.ZONE]
rtype = reply[KnotCtlDataIdx.TYPE]
data = reply[KnotCtlDataIdx.DATA]
# Add the zone if not exists.
if zone not in out:
out[zone] = dict()
out[zone][rtype] = data
def _receive_zone(self, out, reply):
zone = reply[KnotCtlDataIdx.ZONE]
owner = reply[KnotCtlDataIdx.OWNER]
ttl = reply[KnotCtlDataIdx.TTL]
rtype = reply[KnotCtlDataIdx.TYPE]
data = reply[KnotCtlDataIdx.DATA]
# Add the zone if not exists.
if zone not in out:
out[zone] = dict()
if owner not in out[zone]:
out[zone][owner] = dict()
if rtype not in out[zone][owner]:
out[zone][owner][rtype] = dict()
# Add the key/value.
out[zone][owner][rtype]["ttl"] = ttl
if not "data" in out[zone][owner][rtype]:
out[zone][owner][rtype]["data"] = [data]
else:
out[zone][owner][rtype]["data"].append(data)
def _receive_stats(self, out, reply):
zone = reply[KnotCtlDataIdx.ZONE]
section = reply[KnotCtlDataIdx.SECTION]
item = reply[KnotCtlDataIdx.ITEM]
idx = reply[KnotCtlDataIdx.ID]
data = int(reply[KnotCtlDataIdx.DATA])
# Add the zone if not exists.
if zone:
if "zone" not in out:
out["zone"] = dict()
if zone not in out["zone"]:
out["zone"][zone] = dict()
section_level = out["zone"][zone] if zone else out
if section not in section_level:
section_level[section] = dict()
if idx:
if item not in section_level[section]:
section_level[section][item] = dict()
section_level[section][item][idx] = data
else:
section_level[section][item] = data
def receive_stats(self):
"""Receives statistics answer and returns it as a structured dictionary.
@rtype: dict
"""
out = dict()
err_reply = None
while True:
reply = KnotCtlData()
reply_type = self.receive(reply)
# Stop if not data type.
if reply_type not in [KnotCtlType.DATA, KnotCtlType.EXTRA]:
break
# Check for an error.
if reply[KnotCtlDataIdx.ERROR]:
err_reply = reply
continue
self._receive_stats(out, reply)
if err_reply:
raise KnotCtlError(err_reply[KnotCtlDataIdx.ERROR], err_reply)
return out
def receive_block(self):
"""Receives a control answer and returns it as a structured dictionary.
@rtype: dict
"""
out = dict()
err_reply = None
while True:
reply = KnotCtlData()
reply_type = self.receive(reply)
# Stop if not data type.
if reply_type not in [KnotCtlType.DATA, KnotCtlType.EXTRA]:
break
# Check for an error.
if reply[KnotCtlDataIdx.ERROR]:
err_reply = reply
continue
# Check for config data.
if reply[KnotCtlDataIdx.SECTION]:
self._receive_conf(out, reply)
# Check for zone data.
elif reply[KnotCtlDataIdx.ZONE]:
if reply[KnotCtlDataIdx.OWNER]:
self._receive_zone(out, reply)
else:
self._receive_zone_status(out, reply)
else:
continue
if err_reply:
raise KnotCtlError(err_reply[KnotCtlDataIdx.ERROR], err_reply)
return out
| 2.15625
| 2
|