max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
models.py
|
unt-libraries/serial-set-inventory
| 0
|
12779851
|
<gh_stars>0
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'
# into your database.
from __future__ import unicode_literals
from django.db import models
class Auth(models.Model):
username = models.CharField(primary_key=True, max_length=50)
password = models.CharField(max_length=32)
class_field = models.IntegerField(db_column='class') # Field renamed because it was a Python reserved word.
email = models.CharField(max_length=64)
instno = models.IntegerField()
class Meta:
managed = False
db_table = 'auth'
class AuthGroup(models.Model):
name = models.CharField(unique=True, max_length=80)
class Meta:
managed = False
db_table = 'auth_group'
class AuthGroupPermissions(models.Model):
group = models.ForeignKey(AuthGroup)
permission = models.ForeignKey('AuthPermission')
class Meta:
managed = False
db_table = 'auth_group_permissions'
unique_together = (('group', 'permission'),)
class AuthPermission(models.Model):
name = models.CharField(max_length=255)
content_type = models.ForeignKey('DjangoContentType')
codename = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'auth_permission'
unique_together = (('content_type', 'codename'),)
class AuthUser(models.Model):
password = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
is_superuser = models.IntegerField()
username = models.CharField(unique=True, max_length=30)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.CharField(max_length=254)
is_staff = models.IntegerField()
is_active = models.IntegerField()
date_joined = models.DateTimeField()
class Meta:
managed = False
db_table = 'auth_user'
class AuthUserGroups(models.Model):
user = models.ForeignKey(AuthUser)
group = models.ForeignKey(AuthGroup)
class Meta:
managed = False
db_table = 'auth_user_groups'
unique_together = (('user', 'group'),)
class AuthUserUserPermissions(models.Model):
user = models.ForeignKey(AuthUser)
permission = models.ForeignKey(AuthPermission)
class Meta:
managed = False
db_table = 'auth_user_user_permissions'
unique_together = (('user', 'permission'),)
class DjangoAdminLog(models.Model):
action_time = models.DateTimeField()
object_id = models.TextField(blank=True, null=True)
object_repr = models.CharField(max_length=200)
action_flag = models.SmallIntegerField()
change_message = models.TextField()
content_type = models.ForeignKey('DjangoContentType', blank=True, null=True)
user = models.ForeignKey(AuthUser)
class Meta:
managed = False
db_table = 'django_admin_log'
class DjangoContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
class DjangoMigrations(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_migrations'
class DjangoSession(models.Model):
session_key = models.CharField(primary_key=True, max_length=40)
session_data = models.TextField()
expire_date = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_session'
class Institutions(models.Model):
instno = models.AutoField(primary_key=True)
institution = models.CharField(max_length=255)
library = models.CharField(max_length=255)
address1 = models.CharField(max_length=255)
address2 = models.CharField(max_length=255)
city = models.CharField(max_length=255)
st = models.CharField(max_length=255)
zip = models.CharField(max_length=255)
depno = models.CharField(max_length=255)
genphone = models.CharField(db_column='genPhone', max_length=255) # Field name made lowercase.
genemail = models.CharField(db_column='genEmail', max_length=255) # Field name made lowercase.
dateinventory = models.CharField(db_column='dateInventory', max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'institutions'
class Inventory(models.Model):
newid = models.AutoField(primary_key=True)
instno = models.IntegerField()
servolno = models.CharField(max_length=255)
held = models.CharField(max_length=4)
deptl = models.CharField(max_length=4)
note = models.CharField(max_length=255)
class Meta:
managed = False
db_table = 'inventory'
class Servols(models.Model):
newid = models.IntegerField(primary_key=True)
ppno = models.CharField(max_length=255)
servolno = models.CharField(max_length=255)
orderno = models.CharField(max_length=255)
congsess = models.CharField(max_length=255)
title = models.TextField()
docrptnos = models.CharField(max_length=255)
annot = models.CharField(max_length=255)
notissued = models.CharField(max_length=255)
congress = models.CharField(max_length=255)
siteroot = models.CharField(db_column='siteRoot', max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'servols'
class SsiCongress(models.Model):
congress_number = models.IntegerField()
begin_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
class Meta:
managed = False
db_table = 'ssi_congress'
class SsiDocumenttype(models.Model):
house = models.CharField(max_length=2)
document_type = models.CharField(max_length=100)
document_name = models.CharField(unique=True, max_length=106)
class Meta:
managed = False
db_table = 'ssi_documenttype'
class SsiInstitution(models.Model):
institution_name = models.CharField(max_length=255)
library_name = models.CharField(max_length=255)
address_2 = models.CharField(max_length=255)
city = models.CharField(max_length=255)
state = models.CharField(max_length=2)
zip_code = models.CharField(max_length=10)
latitude = models.DecimalField(max_digits=9, decimal_places=6)
longitude = models.DecimalField(max_digits=9, decimal_places=6)
depository_number = models.CharField(max_length=255)
phone_number = models.CharField(max_length=20)
email_address = models.CharField(max_length=254)
date_inventoried = models.DateField()
hidden = models.IntegerField()
address_1 = models.CharField(max_length=255)
class Meta:
managed = False
db_table = 'ssi_institution'
class SsiInventory(models.Model):
departmental_edition = models.IntegerField()
note = models.CharField(max_length=255)
institution = models.ForeignKey(SsiInstitution)
volume = models.ForeignKey('SsiVolume')
class Meta:
managed = False
db_table = 'ssi_inventory'
class SsiSession(models.Model):
session_number = models.CharField(max_length=2)
begin_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
congress = models.ForeignKey(SsiCongress)
class Meta:
managed = False
db_table = 'ssi_session'
class SsiVolume(models.Model):
serial_number = models.CharField(max_length=255)
title = models.CharField(max_length=255)
publication_numbers = models.CharField(max_length=255)
annotation = models.CharField(max_length=255)
not_issued = models.IntegerField()
document_type = models.ForeignKey(SsiDocumenttype)
session = models.ForeignKey(SsiSession)
class Meta:
managed = False
db_table = 'ssi_volume'
| 2.046875
| 2
|
VectorMessenger/MessengerCore/MessengerBase.py
|
VectorMessenger/VectorMessenger_Zero
| 0
|
12779852
|
import socket
class VMUDPBase:
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.settimeout(2.0)
| 2.25
| 2
|
inventory/locations/migrations/0002_auto_20170610_1325.py
|
cnobile2012/inventory
| 10
|
12779853
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-10 17:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('locations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='locationformat',
name='char_definition',
field=models.CharField(help_text="Determine the character position definition where alpha='\\a', numeric='\\d', punctuation='\\p', or any hard coded character. ex. \\a\\d\\d\\d could be B001 or \\a@\\d\\d could be D@99.", max_length=250, verbose_name='Format'),
),
]
| 1.882813
| 2
|
library/pimoroni_physical_feather_pins/nRF52840.py
|
dglaude/physical_feather_pins
| 2
|
12779854
|
<reponame>dglaude/physical_feather_pins<filename>library/pimoroni_physical_feather_pins/nRF52840.py
from . import pin_error
import microcontroller
def pin3():
return microcontroller.pin.P0_31
def pin5():
return microcontroller.pin.P0_04
def pin6():
return microcontroller.pin.P0_05
def pin7():
return microcontroller.pin.P0_30
def pin8():
return microcontroller.pin.P0_28
def pin9():
return microcontroller.pin.P0_02
def pin10():
return microcontroller.pin.P0_03
def pin11():
return microcontroller.pin.P0_14
def pin12():
return microcontroller.pin.P0_13
def pin13():
return microcontroller.pin.P0_15
def pin14():
return microcontroller.pin.P0_24
def pin15():
return microcontroller.pin.P0_25
def pin16():
return microcontroller.pin.P0_10
def pin17():
return microcontroller.pin.P0_12
def pin18():
return microcontroller.pin.P0_11
def pin19():
return microcontroller.pin.P1_08
def pin20():
return microcontroller.pin.P0_07
def pin21():
return microcontroller.pin.P0_26
def pin22():
return microcontroller.pin.P0_27
def pin23():
return microcontroller.pin.P0_06
def pin24():
return microcontroller.pin.P0_08
def pin25():
return microcontroller.pin.P1_09
def init(scope):
"""Pull the pin definitions into the main module namespace"""
for key in globals().keys():
if key.startswith('pin'):
scope[key] = globals()[key]
| 2.21875
| 2
|
src/visualizations/images.py
|
MatanDanos/AttnGAN-v1.1
| 2
|
12779855
|
# This file will implement images
import os
from skimage.io import imsave
def visualize_image(image, image_name):
"""Given an image, will save the image to the figures directory
Parameters:
image: a [N,M,3] tensor
filename (str): name of the image
"""
image_path = os.path.join("../figures", image_name + ".jpg")
print(image_path)
imsave(image_path, image)
| 3.234375
| 3
|
point/make_npz_for_chainercv_upload.py
|
yuyu2172/coco-evaluation
| 1
|
12779856
|
import pickle
import numpy as np
with open('data/fake.pkl', 'rb') as f:
points, labels, scores, keys = pickle.load(f)
with open('data/fake_gt.pkl', 'rb') as f:
gt_points, gt_bboxes, gt_labels, gt_areas, gt_crowdeds = pickle.load(f)
gt_points_yx = []
gt_point_is_valids = []
for gt_point in gt_points:
gt_point_yx = []
gt_point_is_valid = []
for pnt in gt_point:
gt_point_yx.append(pnt[:, :2])
gt_point_is_valid.append(pnt[:, 2])
gt_points_yx.append(gt_point_yx)
gt_point_is_valids.append(gt_point_is_valid)
points_yx = []
for point in points:
point_yx = []
for pnt in point:
point_yx.append(pnt[:, :2])
points_yx.append(point_yx)
np.savez('eval_point_coco_dataset_2019_02_18.npz',
points=gt_points_yx,
is_valids=gt_point_is_valids,
bboxes=gt_bboxes,
labels=gt_labels,
areas=gt_areas,
crowdeds=gt_crowdeds)
np.savez('eval_point_coco_result_2019_02_18.npz',
points=points_yx,
scores=scores,
labels=labels,)
| 2.328125
| 2
|
live-coding/L03/line_intersection.py
|
patricklam/stqam-2017
| 30
|
12779857
|
<gh_stars>10-100
class LineSegment:
def __init__(self, x1, x2):
self.x1 = x1; self.x2 = x2;
# this code was found by <NAME> on stackoverflow:
# http://stackoverflow.com/questions/306316/determine-if-two-rectangles-overlap-each-other?rq=1
def intersect(a, b):
return (a.x1 < b.x2) & (a.x2 > b.x1);
| 2.8125
| 3
|
guillotina_client/tests/conftest.py
|
guillotinaweb/guillotina_client
| 0
|
12779858
|
# -*- coding: utf-8 -*-
pytest_plugins = [
'guillotina_client.tests.fixtures',
'guillotina.tests.fixtures',
]
| 0.992188
| 1
|
docs/examples/container/joyent/instantiate_driver.py
|
rgharris/libcloud
| 0
|
12779859
|
<gh_stars>0
from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
cls = get_driver(Provider.JOYENT)
conn = cls(
host="us-east-1.docker.joyent.com",
port=2376,
key_file="key.pem",
cert_file="~/.sdc/docker/admin/ca.pem",
)
conn.list_images()
| 1.640625
| 2
|
modules/ravestate/testfixtures.py
|
Roboy/diact
| 28
|
12779860
|
import pytest
from reggol import strip_prefix
from testfixtures import LogCapture
from ravestate import *
DEFAULT_MODULE_NAME = 'module'
DEFAULT_PROPERTY_NAME = 'property'
DEFAULT_PROPERTY_ID = f"{DEFAULT_MODULE_NAME}:{DEFAULT_PROPERTY_NAME}"
DEFAULT_PROPERTY_VALUE = 'Kruder'
DEFAULT_PROPERTY_CHANGED = f"{DEFAULT_PROPERTY_ID}:changed"
NEW_PROPERTY_VALUE = 'Dorfmeister'
DEFAULT_PROPERTY = Property(name=DEFAULT_PROPERTY_NAME, default_value=DEFAULT_PROPERTY_VALUE)
DEFAULT_PROPERTY.set_parent_path(DEFAULT_MODULE_NAME)
SIGNAL_A = SignalRef(f"{DEFAULT_MODULE_NAME}:a")
SIGNAL_B = SignalRef(f"{DEFAULT_MODULE_NAME}:b")
SIGNAL_C = SignalRef(f"{DEFAULT_MODULE_NAME}:c")
SIGNAL_D = SignalRef(f"{DEFAULT_MODULE_NAME}:d")
@pytest.fixture
def state_fixture(mocker):
@state(write=(DEFAULT_PROPERTY,), read=(DEFAULT_PROPERTY,))
def state_mock_fn(ctx):
ctx[DEFAULT_PROPERTY] = "test"
state_mock_fn.module_name = DEFAULT_MODULE_NAME
return state_mock_fn
@pytest.fixture
def state_signal_a_fixture(mocker):
@state(read=(DEFAULT_PROPERTY,), signal=SIGNAL_A)
def state_mock_a_fn(ctx):
pass
state_mock_a_fn.module_name = DEFAULT_MODULE_NAME
return state_mock_a_fn
@pytest.fixture
def state_signal_b_fixture(mocker):
@state(signal=SIGNAL_B, cond=SIGNAL_A)
def state_mock_b_fn(ctx):
pass
state_mock_b_fn.module_name = DEFAULT_MODULE_NAME
return state_mock_b_fn
@pytest.fixture
def state_signal_c_fixture(mocker):
@state(signal=SIGNAL_C, cond=SIGNAL_A)
def state_mock_c_fn(ctx):
pass
state_mock_c_fn.module_name = DEFAULT_MODULE_NAME
return state_mock_c_fn
@pytest.fixture
def state_signal_d_fixture(mocker):
@state(signal=SIGNAL_D, cond=SIGNAL_B | SIGNAL_C)
def state_mock_c_fn(ctx):
pass
state_mock_c_fn.module_name = DEFAULT_MODULE_NAME
return state_mock_c_fn
@pytest.fixture
def context_fixture(mocker):
return Context()
@pytest.fixture
def context_with_property_fixture(mocker, context_fixture) -> Context:
context_fixture.add_prop(prop=DEFAULT_PROPERTY)
mocker.patch.object(context_fixture, 'add_prop')
return context_fixture
@pytest.fixture
def context_with_property_and_state_fixture(mocker, context_with_property_fixture, state_fixture):
context_with_property_fixture.add_state(st=state_fixture)
mocker.patch.object(context_with_property_fixture, 'add_state')
return context_with_property_fixture
@pytest.fixture
def context_wrapper_fixture(context_with_property_fixture, state_fixture):
return ContextWrapper(ctx=context_with_property_fixture, state=state_fixture)
@pytest.fixture
def activation_fixture(state_fixture: State, context_with_property_and_state_fixture: Context):
return Activation(state_fixture, context_with_property_and_state_fixture)
@pytest.fixture
def activation_fixture_fallback(activation_fixture: Activation):
activation_fixture.state_to_activate.write_props = None
return activation_fixture
@pytest.fixture
def spike_fixture():
return Spike(sig=DEFAULT_PROPERTY_CHANGED)
@pytest.fixture
def triple_fixture(mocker):
token_mock = mocker.Mock()
token_mock.children = ()
from ravestate_nlp import Triple
return Triple(token_mock, token_mock, token_mock)
| 2.015625
| 2
|
py_local_maxima/cpu.py
|
benhollar/py-local-maxima
| 0
|
12779861
|
from scipy.ndimage.filters import maximum_filter as _max_filter
from scipy.ndimage.morphology import binary_erosion as _binary_erosion
from skimage.feature import peak_local_max
def detect_skimage(image, neighborhood, threshold=1e-12):
"""Detect peaks using a local maximum filter (via skimage)
Parameters
----------
image : numpy.ndarray (2D)
The imagery to find the local maxima of
neighborhood : numpy.ndarray (2D)
A boolean matrix specifying a scanning window for maxima detection.
The neigborhood size is implicitly defined by the matrix dimensions.
threshold : float
The minimum acceptable value of a peak
Returns
-------
numpy.ndarray (2D)
A boolean matrix specifying maxima locations (True) and background
locations (False)
"""
return peak_local_max(image,
footprint=neighborhood,
threshold_abs=threshold,
indices=False)
def detect_maximum_filter(image, neighborhood, threshold=1e-12):
"""Detect peaks using a local maximum filter
Code courtesy https://stackoverflow.com/a/3689710 (adapted slightly).
Parameters
----------
image : numpy.ndarray (2D)
The imagery to find the local maxima of
neighborhood : numpy.ndarray (2D)
A boolean matrix specifying a scanning window for maxima detection.
The neigborhood size is implicitly defined by the matrix dimensions.
threshold : float
The minimum acceptable value of a peak
Returns
-------
numpy.ndarray (2D)
A boolean matrix specifying maxima locations (True) and background
locations (False)
"""
# Apply the local maximum filter, then remove any background (below
# threshold) values from our result.
detected_peaks = _max_filter(image, footprint=neighborhood) == image
detected_peaks[image < threshold] = False
return detected_peaks
| 3.359375
| 3
|
eval_synthesis_quality.py
|
CJWBW/image2video-synthesis-using-cINNs
| 85
|
12779862
|
<gh_stars>10-100
import argparse, os, torch, random
from tqdm import tqdm
import lpips, numpy as np
from data.get_dataloder import get_eval_loader
from get_model import Model
from metrics.FVD.evaluate_FVD import compute_fvd
from metrics.FID.FID_Score import calculate_FID
from metrics.FID.inception import InceptionV3
from metrics.DTFVD import DTFVD_Score
from utils.auxiliaries import set_seed
parser = argparse.ArgumentParser()
parser.add_argument('-gpu', type=str, required=True, help="Define GPU on which to run")
parser.add_argument('-dataset', type=str)
parser.add_argument('-texture', type=str, required=False, help='Specify texture when using DTDB')
parser.add_argument('-ckpt_path', type=str, required=False, help="Specify path if outside of repo for chkpt")
parser.add_argument('-data_path', type=str, required=False, help="Path to dataset arranged as described in readme")
parser.add_argument('-seq_length', type=int, default=16)
parser.add_argument('-bs', type=int, default=6, help='Batchsize')
parser.add_argument('-FID', type=bool)
parser.add_argument('-FVD', type=bool)
parser.add_argument('-DTFVD', type=bool)
parser.add_argument('-LPIPS', type=bool)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
set_seed(249)
## Load model from config
path_ds = f'{args.dataset}/{args.texture}/' if args.dataset == 'DTDB' else f'{args.dataset}'
ckpt_path = f'./models/{path_ds}/stage2/' if not args.ckpt_path else args.ckpt_path
model = Model(ckpt_path, args.seq_length)
# set up dataloader
dataset = get_eval_loader(args.dataset, args.seq_length + 1, args.data_path, model.config)
dataloader = torch.utils.data.DataLoader(dataset, num_workers=10, batch_size=args.bs, shuffle=False)
## Generate samples
seq_real, seq_fake = [], []
with torch.no_grad():
for batch_idx, file_dict in enumerate(tqdm(dataloader)):
seq = file_dict["seq"].type(torch.FloatTensor).cuda()
seq_gen = model(seq[:, 0])
if args.dataset == 'bair':
## Following https://arxiv.org/abs/1812.01717 the evaluation sequence length is of length 16 after
## concatenating the conditioning (in our case a single frame)
seq_gen = torch.cat((seq[:, :1], seq_gen[:, :-1]), dim=1)
seq_real.append(seq[:, :-1].detach().cpu())
elif args.dataset == 'iPER':
## For fair comparison with other methods which condition on multiple frames we concatenated only the last
## conditioning frame to the sequence and used all generated frames for computing FVD on iPER
seq_gen = torch.cat((seq[:, :1], seq_gen), dim=1)
seq_real.append(seq.detach().cpu())
else:
## On dynamic textures we evaluated FVD without concatenating GT frames to the generated one
seq_real.append(seq[:, :-1].detach().cpu())
seq_fake.append(seq_gen.detach().cpu())
seq2 = torch.cat(seq_real, 0)
seq1 = torch.cat(seq_fake, 0)
del model
torch.cuda.empty_cache()
assert seq2.shape == seq1.shape
if args.FID or args.LPIPS:
pd_imgs = seq1.reshape(-1, *seq1.shape[2:])
gt_imgs = seq2.reshape(-1, *seq2.shape[2:])
if args.FID:
print('Evaluate FID')
inception = InceptionV3()
batch_size = 50
FID, _ = calculate_FID(inception, pd_imgs, gt_imgs, batch_size, 2048)
del inception
torch.cuda.empty_cache()
print(f'FID score of {FID}')
if args.LPIPS:
print('Evaluate LPIPS')
LPIPS = 0
lpips_vgg = lpips.LPIPS(net='vgg').cuda()
with torch.no_grad():
for i in range(pd_imgs.size(0)//10):
pd_batch, gt_batch = pd_imgs[i*10:(i+1)*10], gt_imgs[i*10:(i+1)*10]
LPIPS += lpips_vgg(pd_batch.cuda(), gt_batch.cuda()).mean().cpu().item()
_ = lpips_vgg.cpu()
LPIPS /= pd_imgs.size(0)//10
del lpips_vgg
torch.cuda.empty_cache()
print(f'LPIPS score of {LPIPS}')
## Evaluate Dynamic Texture FVD score
if args.DTFVD:
print('Evaluate DTFVD')
batch_size = 40
if args.seq_length > 16:
I3D = DTFVD_Score.load_model(length=32).cuda()
DTFVD = DTFVD_Score.calculate_FVD32(I3D, seq1, seq2, batch_size, True)
else:
I3D = DTFVD_Score.load_model(length=16).cuda()
DTFVD = DTFVD_Score.calculate_FVD(I3D, seq1, seq2, batch_size, True)
del I3D
torch.cuda.empty_cache()
print(f'DTFVD score of {DTFVD}')
if args.FVD:
print('Evaluate FVD')
seq1 = seq1[:seq1.size(0) // 16 * 16].reshape(-1, 16, seq1.size(1), 3, seq1.size(-1), seq1.size(-1))
seq2 = seq2[:seq2.size(0) // 16 * 16].reshape(-1, 16, seq2.size(1), 3, seq2.size(-1), seq2.size(-1))
fvd = compute_fvd(seq1, seq2)
print(f'FVD score of {fvd}')
| 1.9375
| 2
|
uptick/markets.py
|
chainsquad/uptick
| 35
|
12779863
|
<gh_stars>10-100
import click
from click_datetime import Datetime
from datetime import datetime, timedelta
from bitshares.market import Market
from bitshares.amount import Amount
from bitshares.account import Account
from bitshares.price import Price, Order
from .decorators import onlineChain, unlockWallet, online, unlock
from .ui import print_tx, print_table, format_table
from .main import main, config
@main.command()
@click.pass_context
@onlineChain
@click.argument("market", nargs=1)
@click.option(
"--limit", type=int, help="Limit number of elements", default=10
) # fixme add start and stop time
@click.option(
"--start",
help="Start datetime '%Y-%m-%d %H:%M:%S'",
type=Datetime(format="%Y-%m-%d %H:%M:%S"),
)
@click.option(
"--stop",
type=Datetime(format="%Y-%m-%d %H:%M:%S"),
help="Stop datetime '%Y-%m-%d %H:%M:%S'",
default=datetime.utcnow(),
)
def trades(ctx, market, limit, start, stop):
""" List trades in a market
"""
market = Market(market, bitshares_instance=ctx.bitshares)
t = [["time", "quote", "base", "price"]]
for trade in market.trades(limit, start=start, stop=stop):
t.append(
[
str(trade["time"]),
str(trade["quote"]),
str(trade["base"]),
"{:f} {}/{}".format(
trade["price"],
trade["base"]["asset"]["symbol"],
trade["quote"]["asset"]["symbol"],
),
]
)
print_table(t)
@main.command()
@click.pass_context
@onlineChain
@click.argument("market", nargs=1)
def ticker(ctx, market):
""" Show ticker of a market
"""
market = Market(market, bitshares_instance=ctx.bitshares)
ticker = market.ticker()
t = [["key", "value"]]
for key in ticker:
t.append([key, str(ticker[key])])
print_table(t)
@main.command()
@click.pass_context
@onlineChain
@click.argument("orders", type=str, nargs=-1)
@click.option(
"--account",
default=config["default_account"],
type=str,
help="Account to use for this action",
)
@unlockWallet
def cancel(ctx, orders, account):
""" Cancel one or multiple orders
"""
print_tx(ctx.bitshares.cancel(orders, account=account))
@main.command()
@click.pass_context
@onlineChain
@click.argument("market", nargs=1)
def orderbook(ctx, market):
""" Show the orderbook of a particular market
"""
market = Market(market, bitshares_instance=ctx.bitshares)
orderbook = market.orderbook()
ta = {}
ta["bids"] = [["quote", "sum quote", "base", "sum base", "price"]]
cumsumquote = Amount(0, market["quote"])
cumsumbase = Amount(0, market["base"])
for order in orderbook["bids"]:
cumsumbase += order["base"]
cumsumquote += order["quote"]
ta["bids"].append(
[
str(order["quote"]),
str(cumsumquote),
str(order["base"]),
str(cumsumbase),
"{:f} {}/{}".format(
order["price"],
order["base"]["asset"]["symbol"],
order["quote"]["asset"]["symbol"],
),
]
)
ta["asks"] = [["price", "base", "sum base", "quote", "sum quote"]]
cumsumquote = Amount(0, market["quote"])
cumsumbase = Amount(0, market["base"])
for order in orderbook["asks"]:
cumsumbase += order["base"]
cumsumquote += order["quote"]
ta["asks"].append(
[
"{:f} {}/{}".format(
order["price"],
order["base"]["asset"]["symbol"],
order["quote"]["asset"]["symbol"],
),
str(order["base"]),
str(cumsumbase),
str(order["quote"]),
str(cumsumquote),
]
)
t = [["bids", "asks"]]
t.append([format_table(ta["bids"]), format_table(ta["asks"])])
print_table(t)
@main.command()
@click.pass_context
@onlineChain
@click.argument("buy_amount", type=float)
@click.argument("buy_asset", type=str)
@click.argument("price", type=float)
@click.argument("sell_asset", type=str)
@click.option("--order-expiration", default=None)
@click.option(
"--account",
default=config["default_account"],
type=str,
help="Account to use for this action",
)
@unlockWallet
def buy(ctx, buy_amount, buy_asset, price, sell_asset, order_expiration, account):
""" Buy a specific asset at a certain rate against a base asset
"""
amount = Amount(buy_amount, buy_asset)
price = Price(
price, base=sell_asset, quote=buy_asset, bitshares_instance=ctx.bitshares
)
print_tx(
price.market.buy(price, amount, account=account, expiration=order_expiration)
)
@main.command()
@click.pass_context
@onlineChain
@click.argument("sell_amount", type=float)
@click.argument("sell_asset", type=str)
@click.argument("price", type=float)
@click.argument("buy_asset", type=str)
@click.option("--order-expiration", default=None)
@click.option(
"--account",
default=config["default_account"],
help="Account to use for this action",
type=str,
)
@unlockWallet
def sell(ctx, sell_amount, sell_asset, price, buy_asset, order_expiration, account):
""" Sell a specific asset at a certain rate against a base asset
"""
amount = Amount(sell_amount, sell_asset)
price = Price(
price, quote=sell_asset, base=buy_asset, bitshares_instance=ctx.bitshares
)
print_tx(
price.market.sell(price, amount, account=account, expiration=order_expiration)
)
@main.command()
@click.pass_context
@onlineChain
@click.argument("account", type=str)
def openorders(ctx, account):
""" List open orders of an account
"""
account = Account(
account or config["default_account"], bitshares_instance=ctx.bitshares
)
t = [["Price", "Quote", "Base", "ID"]]
for o in account.openorders:
t.append(
[
"{:f} {}/{}".format(
o["price"],
o["base"]["asset"]["symbol"],
o["quote"]["asset"]["symbol"],
),
str(o["quote"]),
str(o["base"]),
o["id"],
]
)
print_table(t)
@main.command()
@click.option("--account", default=None)
@click.argument("market")
@click.pass_context
@online
@unlock
def cancelall(ctx, market, account):
""" Cancel all orders of an account in a market
"""
market = Market(market)
ctx.bitshares.bundle = True
market.cancel([x["id"] for x in market.accountopenorders(account)], account=account)
print_tx(ctx.bitshares.txbuffer.broadcast())
@main.command()
@click.option("--account", default=None)
@click.argument("market")
@click.argument("side", type=click.Choice(["buy", "sell"]))
@click.argument("min", type=float)
@click.argument("max", type=float)
@click.argument("num", type=float)
@click.argument("total", type=float)
@click.option("--order-expiration", default=None)
@click.pass_context
@online
@unlock
def spread(ctx, market, side, min, max, num, total, order_expiration, account):
""" Place multiple orders
\b
:param str market: Market pair quote:base (e.g. USD:BTS)
:param str side: ``buy`` or ``sell`` quote
:param float min: minimum price to place order at
:param float max: maximum price to place order at
:param int num: Number of orders to place
:param float total: Total amount of quote to use for all orders
:param int order_expiration: Number of seconds until the order expires from the books
"""
from tqdm import tqdm
from numpy import linspace
market = Market(market)
ctx.bitshares.bundle = True
if min < max:
space = linspace(min, max, num)
else:
space = linspace(max, min, num)
func = getattr(market, side)
for p in tqdm(space):
func(p, total / float(num), account=account, expiration=order_expiration)
print_tx(ctx.bitshares.txbuffer.broadcast())
@main.command()
@click.pass_context
@onlineChain
@click.argument("amount", type=float)
@click.argument("symbol", type=str)
@click.option("--ratio", default=None, help="Collateral Ratio", type=float)
@click.option(
"--account",
default=config["default_account"],
help="Account to use for this action",
type=str,
)
@unlockWallet
def borrow(ctx, amount, symbol, ratio, account):
""" Borrow a bitasset/market-pegged asset
"""
from bitshares.dex import Dex
dex = Dex(bitshares_instance=ctx.bitshares)
print_tx(
dex.borrow(Amount(amount, symbol), collateral_ratio=ratio, account=account)
)
@main.command()
@click.pass_context
@onlineChain
@click.argument("symbol", type=str)
@click.option("--ratio", default=2, help="Collateral Ratio", type=float)
@click.option(
"--account",
default=config["default_account"],
help="Account to use for this action",
type=str,
)
@unlockWallet
def updateratio(ctx, symbol, ratio, account):
""" Update the collateral ratio of a call positions
"""
from bitshares.dex import Dex
dex = Dex(bitshares_instance=ctx.bitshares)
print_tx(dex.adjust_collateral_ratio(symbol, ratio, account=account))
@main.command()
@click.pass_context
@onlineChain
@click.argument("symbol", type=str)
@click.argument("amount", type=float)
@click.option(
"--account",
default=config["default_account"],
type=str,
help="Account to use for this action",
)
@unlockWallet
def fundfeepool(ctx, symbol, amount, account):
""" Fund the fee pool of an asset
"""
print_tx(ctx.bitshares.fund_fee_pool(symbol, amount, account=account))
@main.command()
@click.pass_context
@onlineChain
@click.argument("collateral_amount", type=float)
@click.argument("collateral_symbol", type=str)
@click.argument("debt_amount", type=float)
@click.argument("debt_symbol", type=str)
@click.option(
"--account",
default=config["default_account"],
type=str,
help="Account to use for this action",
)
@unlockWallet
def bidcollateral(
ctx, collateral_symbol, collateral_amount, debt_symbol, debt_amount, account
):
""" Bid for collateral in the settlement fund
"""
print_tx(
ctx.bitshares.bid_collateral(
Amount(collateral_amount, collateral_symbol),
Amount(debt_amount, debt_symbol),
account=account,
)
)
@main.command()
@click.pass_context
@onlineChain
@click.argument("amount", type=float)
@click.argument("symbol", type=str)
@click.option(
"--account",
default=config["default_account"],
type=str,
help="Account to use for this action",
)
@unlockWallet
def settle(ctx, symbol, amount, account):
""" Fund the fee pool of an asset
"""
print_tx(ctx.bitshares.asset_settle(Amount(amount, symbol), account=account))
| 2.46875
| 2
|
python/merge_files.py
|
caleberi/LeetCode
| 1
|
12779864
|
<reponame>caleberi/LeetCode
# Objective
# Merge a set of sorted files of different length into a single sorted file.
# We need to find an optimal solution, where the resultant file will be generated in minimum time.
# Approach
# If the number of sorted files are given, there are many ways to merge them into a single sorted file.
# This merge can be performed pair wise.To merge a m-record file and a n-record file requires possibly m+n record moves,
# the optimal choice being, merge the two smallest files together at each step (greedy approach).
from os import stat,path,getcwd
import os
from sys import getfilesystemencoding
from collections import deque
def resolve_os_encoding(path):
return os.fsdecode(path) if getfilesystemencoding() != "utf-8" else path
def get_file_info(filename):
file_path = path.join(getcwd(),filename)
file_stat = stat(file_path)
word_count = 0
file_size = file_stat.st_size
file = open(file_path)
for line in file.readlines():
word_count+=len(line.split())
file.close()
return (word_count,file_size,file_path)
def get_file_stats(file_list):
stats = []
for file in file_list:
stats.append(get_file_info(file))
return stats
def merge_files(file_list,dest_file):
file_stats = get_file_stats(file_list)
file_stats = sorted(file_stats,key=lambda s: s[1],reverse=True)
queue = deque(file_stats)
while len(queue) > 1:
largest = queue.popleft()
smallest = queue.pop()
with open(largest[2],"a") as fs:
small_file = open(smallest[2])
fs.write("\n")
fs.writelines(small_file.readlines())
small_file.close()
os.unlink(smallest[2])
queue.appendleft(get_file_info(largest[2]))
if __name__ == "__main__":
merge_files([
"merge_files/a.txt",
"merge_files/b.txt",
"merge_files/c.txt"]
,"merge_files/result.txt"
)
| 3.921875
| 4
|
test/automation/automate.py
|
agupta54/ulca
| 3
|
12779865
|
<filename>test/automation/automate.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 4 17:35:27 2021.
@author: dhiru579 @ Tarento
"""
import os
from argparse import ArgumentParser
import config
import driver_script
from core import core_all as core
from test import test_all as test
from dataset import dataset_all as dataset
from model import model_all as model
if __name__ == "__main__":
parser = ArgumentParser()
# flag-arguments
parser.add_argument("-u", "--update-schema", help="updating schema", action="store_true")
parser.add_argument("-c", "--chart", help="gets chart data", action="store_true")
parser.add_argument("-ta","--test-all", help="performs website testing", action="store_true")
parser.add_argument("-tp","--test-part", help="performs website testing", action="store_true")
parser.add_argument("-l", "--login", help="checking credentials", action="store_true")
parser.add_argument("-d", "--dataset", help="flag for dataset functions", action="store_true")
parser.add_argument("-m", "--model", help="flag for model functions", action="store_true")
parser.add_argument("--publish", help="publish the model", action="store_true")
parser.add_argument("--unpublish", help="unpublish the model", action="store_true")
parser.add_argument("--dont-skip", action="store_true", help="stops skipping status check",)
# value-argumenst
parser.add_argument("-n", "--name", help="Dataset/Model Name", type=str, default="")
parser.add_argument("-url", "--url", help="Dataset/Model URL", type=str, default="")
parser.add_argument("-t", "--type", help="Dataset/Model Type", type=str, default="")
parser.add_argument("-i", "--input", help="input csv/jsonfile,urls,seentence", type=str, default="",)
parser.add_argument("-g","--group",help="group chart by ['domain', 'collection', 'submitter']",type=str,default="",)
parser.add_argument("-src", "--source", help="source language", type=str, default="")
parser.add_argument("-tgt", "--target", help="target languages", type=str, default="")
parser.add_argument("--domain", help="domain for searched dataset", type=str, default="")
parser.add_argument("-b", "--benchmark", help="Benchamrk Name", type=str, default="")
parser.add_argument("--metric", help="Metric Name", type=str, default="")
parser.add_argument("--list", help="listing models,benchmark,metrics", type=str, default="")
args = parser.parse_args()
update_schema_flag = args.update_schema
test_a_flag = args.test_all
test_p_flag = args.test_part
chart_flag = args.chart
groupby = args.group
login_flag = args.login
dataset_flag = args.dataset
model_flag = args.model
publish_flag = args.publish
unpublish_flag = args.unpublish
dont_skip_flag = args.dont_skip
inp = args.input.strip()
name = args.name.strip()
url = args.url.strip()
typex = args.type.lower().replace(" ", "-").strip()
domain = args.domain.lower().replace(" ", "-").strip()
benchmark = args.benchmark.strip()
metric = args.metric.strip()
src = args.source.lower()
tgt = args.target.lower().split(",") # spliting str into list
tgt = list(filter(None, tgt)) # filtering list
listby = args.list.lower()
# enabling-terminal-color-in-windows-only
if os.name.lower() == "nt":
os.system("color")
# updating schema
if update_schema_flag:
core.update_schema(config.ULCA_SCHEMAFILE_URL, config.SCHEMA_FILENAME)
core.exit_program()
# loading the driver
driver = driver_script.load_driver(config.AUTOMATION_BROWSER)
# getting chart data
if chart_flag:
if typex.find("-corpus") < 0:
typex += "-corpus"
driver = core.get_chart_data(typex, groupby, src, tgt, driver)
driver_script.close_driver(driver)
core.exit_program()
# running public-web-test script
if test_a_flag:
driver = test.perform_testing_all(core.perform_login,driver)
driver_script.close_driver(driver)
core.exit_program()
if test_p_flag:
driver = test.perform_testing_partly(core.perform_login,driver)
driver_script.close_driver(driver)
core.exit_program()
login_status = False
# if just login then quit
if login_flag:
login_status, driver = core.perform_login(driver)
driver_script.close_driver(driver)
core.exit_program()
# dataset related processes
elif dataset_flag:
login_status, driver = core.perform_login(driver)
# adding -corpus to the dataset-type
if typex.find("-corpus") < 0:
typex += "-corpus"
if name != "":
# submit-dataset
driver = dataset.perform_submit_get_status(name, url, driver)
elif inp != "":
# submit-dataset-using-csv
driver = dataset.perform_upload_with_csv(inp, driver, d_skip=dont_skip_flag)
else:
# search-and-download-dataset
driver = dataset.perform_search_and_download(typex, tgt, src, domain, driver)
# model related processes
elif model_flag:
if typex != "":
driver = model.perform_translate(name, typex, inp, driver)
else:
login_status, driver = core.perform_login(driver)
if inp != "":
status, driver = model.perform_model_submit(name, inp, driver)
elif listby != "":
driver = model.list_public_model_data(True,driver)
elif (unpublish_flag or publish_flag): #status and
driver = model.run_publish_task(name, publish_flag, unpublish_flag, driver)
else:
driver = model.run_benchmark(name, benchmark, metric, driver)
else:
core.print_no_flag_found()
driver_script.close_driver(driver)
| 2.125
| 2
|
model.py
|
RSMagneto/MSIT
| 1
|
12779866
|
<filename>model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from CT import CT
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, padding=0, dilation=1, isrelu=True):
super(BasicConv2d, self).__init__()
if kernel_size == 1:
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, bias=False)
else:
self.conv = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False)
)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.isrelu = isrelu
def forward(self, x):
x = self.conv(x)
if self.isrelu:
x = self.relu(x)
return x
class FoldAttention(nn.Module):
def __init__(self, indim=64, patch_size=8, image_size=64, num_heads=32,qkv_bias=False):
super(FoldAttention, self).__init__()
self.dim = patch_size ** 2 * indim
self.norm_q, self.norm_k_pan, self.norm_v_pan = nn.LayerNorm(self.dim*2), nn.LayerNorm(self.dim), nn.LayerNorm(self.dim)
self.norm_k_ms, self.norm_v_ms = nn.LayerNorm(self.dim), nn.LayerNorm(self.dim)
self.to_q = nn.Linear(self.dim*2, self.dim//4, bias=qkv_bias)
self.to_k_pan = nn.Linear(self.dim, self.dim//4, bias=qkv_bias)
self.to_v_pan = nn.Linear(self.dim, self.dim, bias=qkv_bias)
self.to_k_ms = nn.Linear(self.dim, self.dim//4, bias=qkv_bias)
self.to_v_ms = nn.Linear(self.dim, self.dim, bias=qkv_bias)
self.feat2patch = torch.nn.Unfold(kernel_size=patch_size, padding=0, stride=patch_size)
self.patch2feat = torch.nn.Fold(output_size=(image_size, image_size), kernel_size=patch_size, padding=0, stride=patch_size)
self.scale = (self.dim / num_heads) ** (-0.5)
self.image_size = image_size
self.heads = num_heads
self.proj1 = nn.Linear(self.dim, self.dim)
self.proj2 = nn.Linear(self.dim, self.dim)
def get_qkv(self, pan, ms):
q = torch.cat([pan,ms],1)
unfold_q = self.feat2patch(q)
unfold_q = rearrange(unfold_q, "b c n -> b n c")
unfold_q = self.to_q(self.norm_q(unfold_q))
q = rearrange(unfold_q, "b n (g c) -> b g n c", g=self.heads)
unfold_k_pan = self.feat2patch(pan)
unfold_k_pan = rearrange(unfold_k_pan, "b c n -> b n c")
unfold_k_pan = self.to_k_pan(self.norm_k_pan(unfold_k_pan))
k_pan = rearrange(unfold_k_pan, "b n (g c) -> b g n c", g=self.heads)
unfold_v_pan = self.feat2patch(pan)
unfold_v_pan = rearrange(unfold_v_pan, "b c n -> b n c")
unfold_v_pan = self.to_v_pan(self.norm_v_pan(unfold_v_pan))
v_pan = rearrange(unfold_v_pan, "b n (g c) -> b g n c", g=self.heads)
unfold_k_ms = self.feat2patch(ms)
unfold_k_ms = rearrange(unfold_k_ms, "b c n -> b n c")
unfold_k_ms = self.to_k_ms(self.norm_k_ms(unfold_k_ms))
k_ms = rearrange(unfold_k_ms, "b n (g c) -> b g n c", g=self.heads)
unfold_v_ms = self.feat2patch(ms)
unfold_v_ms = rearrange(unfold_v_ms, "b c n -> b n c")
unfold_v_ms = self.to_v_ms(self.norm_v_ms(unfold_v_ms))
v_ms = rearrange(unfold_v_ms, "b n (g c) -> b g n c", g=self.heads)
return q, k_pan, v_pan, k_ms, v_ms
def forward(self, pan, ms):
q, k_pan, v_pan, k_ms, v_ms = self.get_qkv(pan, ms)
attn_pan = (q @ k_pan.transpose(-2, -1)) * self.scale
attn_ms = (q @ k_ms.transpose(-2, -1)) * self.scale
attn_pan = F.softmax(attn_pan, dim=-1)
attn_ms = F.softmax(attn_ms, dim=-1)
out_pan = (attn_pan @ v_pan).transpose(1, 2)
out_ms = (attn_ms @ v_ms).transpose(1, 2)
out_pan = rearrange(out_pan, "b q g c -> b (g c) q")
out_ms = rearrange(out_ms, "b q g c -> b (g c) q")
out_pan = self.patch2feat(out_pan)
out_ms = self.patch2feat(out_ms)
out = torch.cat([out_pan+pan, out_ms+ms], 1)
return out
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class FusionBlock(torch.nn.Module):
def __init__(self, channels, patch_size, image_size):
super(FusionBlock, self).__init__()
self.fold_attn = FoldAttention(channels, patch_size, image_size)
self.conv = BasicConv2d(2 * channels, channels, 1, 1, isrelu=False)
def forward(self, x_pan, x_ms):
a_cat = self.fold_attn(x_pan, x_ms)
out = self.conv(a_cat)
return out
class Fusion_network(nn.Module):
def __init__(self, nC):
super(Fusion_network, self).__init__()
img_size = [64, 32, 16]
patch_size = [4, 4, 1]
self.fusion_block1 = FusionBlock(nC[0], patch_size[0], img_size[0])
self.fusion_block2 = FusionBlock(nC[1], patch_size[1], img_size[1])
self.fusion_block3 = FusionBlock(nC[2], patch_size[2], img_size[2])
def forward(self, en_ir, en_vi):
f1_0 = self.fusion_block1(en_ir[0], en_vi[0])
f2_0 = self.fusion_block2(en_ir[1], en_vi[1])
f3_0 = self.fusion_block3(en_ir[2], en_vi[2])
return [f1_0, f2_0, f3_0]
class tail(nn.Module):
def __init__(self, nb_filter, output_nc=4, deepsupervision=True):
super(tail, self).__init__()
self.deepsupervision = deepsupervision
self.nb_filter = nb_filter
block = DenseBlock_light
self.DB1_1 = block(nb_filter[0], 32)
self.DB2_1 = block(nb_filter[0] + nb_filter[1], nb_filter[0])
self.DB3_1 = block(nb_filter[1] + nb_filter[2], nb_filter[1])
self.DB2_2 = block(nb_filter[0] + nb_filter[1], nb_filter[0])
self.conv_out = BasicConv2d(32, output_nc, 1, isrelu=False)
self.up4_1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(self.nb_filter[2], self.nb_filter[2]*4, 3, 1, 0),
nn.PixelShuffle(2)
)
self.up3_1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(self.nb_filter[1], self.nb_filter[1]*4, 3, 1, 0),
nn.PixelShuffle(2)
)
self.up3_2 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(self.nb_filter[1], self.nb_filter[1]*4, 3, 1, 0),
nn.PixelShuffle(2)
)
self.up2_1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(self.nb_filter[0], self.nb_filter[0] * 4, 3, 1, 0),
nn.PixelShuffle(4)
)
self.up2_2 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(self.nb_filter[0], self.nb_filter[0] * 4, 3, 1, 0),
nn.PixelShuffle(4)
)
def forward(self, x4_t, x3_t, x2_t):
x2_1 = self.DB2_1(torch.cat([x2_t, self.up3_1(x3_t)], 1))
x3_1 = self.DB3_1(torch.cat([x3_t, self.up4_1(x4_t)], 1))
x2_2 = self.DB2_2(torch.cat([x2_1, self.up3_2(x3_1)], 1))
out = self.conv_out(torch.cat([self.up2_1(x2_1), self.up2_2(x2_2)], 1))
return out
class DenseBlock_light(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super(DenseBlock_light, self).__init__()
out_channels_def = int(in_channels / 2)
denseblock = []
denseblock += [BasicConv2d(in_channels, out_channels_def),
BasicConv2d(out_channels_def, out_channels, 1)]
self.denseblock = nn.Sequential(*denseblock)
def forward(self, x):
out = self.denseblock(x)
return out
class model(nn.Module):
def __init__(self, nb_filter=[64, 128, 256, 512]):
super(model, self).__init__()
self.msbackbone = CT(in_chans=4)
self.panbackbone = CT(in_chans=1)
self.fusion_model = Fusion_network(nb_filter)
self.decoder = tail(nb_filter)
def forward(self, ms, pan):
ms_pvt = self.msbackbone(ms)
pan_pvt = self.panbackbone(pan)
f = self.fusion_model(ms_pvt, pan_pvt)
x4_t = f[2]
x3_t = f[1]
x2_t = f[0]
out = self.decoder(x4_t, x3_t, x2_t)
return out
if __name__ == '__main__':
model = model()
ms = torch.randn(1, 4, 256, 256)
pan = torch.randn(1, 1, 256, 256)
prediction = model(ms, pan)
print(prediction.size())
| 2.5625
| 3
|
tasks/snli/third_party/datasets/__init__.py
|
etri-edgeai/nn-comp-discblock
| 10
|
12779867
|
<filename>tasks/snli/third_party/datasets/__init__.py
from .snli import *
from .multinli import *
| 1.039063
| 1
|
source/campo/op_fieldagents/__init__.py
|
computationalgeography/campo
| 2
|
12779868
|
<reponame>computationalgeography/campo<gh_stars>1-10
from .operators import *
from .operations import *
| 1.0625
| 1
|
make_encode_sets.py
|
akiss77/rust-url
| 0
|
12779869
|
# Copyright 2013-2014 <NAME>.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# Run as: python make_encode_sets.py > src/encode_sets.rs
print('''\
// Copyright 2013-2014 <NAME>.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Generated by make_encode_sets.py
''')
for name, encoded in [
('SIMPLE', ''),
('QUERY', r''' "#<>`'''),
('DEFAULT', r''' "#<>`?'''),
('USERINFO', r''' "#<>`?@'''),
('PASSWORD', r''' "#<>`?@\/'''),
('USERNAME', r''' "#<>`?@\/:'''),
('FORM_URLENCODED', r''' !"#$%&\'()+,/:;<=>?@[\]^`{|}'''),
]:
print(
"pub static %s: [&'static str, ..256] = [\n%s\n];\n\n"
% (name, '\n'.join(
' ' + ' '.join(
'"%s%s",' % ("\\" if chr(b) in '\\"' else "", chr(b))
if 0x20 <= b <= 0x7E and chr(b) not in encoded
else '"%%%02X",' % b
for b in range(s, s + 8)
) for s in range(0, 256, 8))))
| 1.992188
| 2
|
src/test/latency/hosts/broker.py
|
acatalfano/distributed-systems-assignment-1
| 0
|
12779870
|
<reponame>acatalfano/distributed-systems-assignment-1<filename>src/test/latency/hosts/broker.py
from app import BrokerFactory
def __main__() -> None:
factory = BrokerFactory()
factory.build_broker()
if __name__ == '__main__':
__main__()
| 1.578125
| 2
|
binary_insertion_sort.py
|
Vassago55/normal_sort
| 0
|
12779871
|
# -*- coding: utf-8 -*-
import random
"""
折半插入排序 O(n) = n^2
"""
class BinaryInsertion(object):
def __init__(self, original_list):
self.original_list = original_list
def sort(self):
length = len(self.original_list)
for i in range(1, length):
self.binary(start=0, end=i-1, current=i)
def binary(self, start, end, current):
cursor = int((end + start) / 2) if end != start else end
if (end == start) or (cursor == 0) or (self.original_list[current] == self.original_list[cursor]):
if self.original_list[current] >= self.original_list[cursor]:
self.original_list.insert(cursor+1, self.original_list[current])
else:
self.original_list.insert(cursor, self.original_list[current])
del self.original_list[current+1]
elif self.original_list[current] > self.original_list[cursor]:
self.binary(cursor+1, end, current)
elif self.original_list[current] < self.original_list[cursor]:
self.binary(start, cursor-1, current)
if __name__ == '__main__':
my_list = [random.randint(0, 100) for _ in range(0, 10)]
print("before sort: {}".format(my_list))
BinaryInsertion(my_list).sort()
print("after sort: {}".format(my_list))
| 3.921875
| 4
|
statsmodels/datasets/sunspots/data.py
|
yarikoptic/statsmodels
| 34
|
12779872
|
<reponame>yarikoptic/statsmodels<filename>statsmodels/datasets/sunspots/data.py
"""Yearly sunspots data 1700-2008"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This data is public domain."""
TITLE = __doc__
SOURCE = """
http://www.ngdc.noaa.gov/stp/solar/solarda3.html
The original dataset contains monthly data on sunspot activity in the file
./src/sunspots_yearly.dat. There is also sunspots_monthly.dat.
"""
DESCRSHORT = """Yearly (1700-2008) data on sunspots from the National
Geophysical Data Center."""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of Observations - 309 (Annual 1700 - 2008)
Number of Variables - 1
Variable name definitions::
SUNACTIVITY - Number of sunspots for each year
The data file contains a 'YEAR' variable that is not returned by load.
"""
from numpy import recfromtxt, array
from pandas import Series, DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the yearly sunspot data and returns a data class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
This dataset only contains data for one variable, so the attributes
data, raw_data, and endog are all the same variable. There is no exog
attribute defined.
"""
data = _get_data()
endog_name = 'SUNACTIVITY'
endog = array(data[endog_name], dtype=float)
dataset = Dataset(data=data, names=[endog_name], endog=endog,
endog_name=endog_name)
return dataset
def load_pandas():
data = DataFrame(_get_data())
# TODO: time series
endog = Series(data['SUNACTIVITY'], index=data['YEAR'].astype(int))
dataset = Dataset(data=data, names=list(data.columns),
endog=endog, endog_name='volume')
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
with open(filepath + '/sunspots.csv', 'rb') as f:
data = recfromtxt(f, delimiter=",",
names=True, dtype=float)
return data
| 2.78125
| 3
|
loggerBot/common/utils.py
|
mcpiroman/jarchiwum-server
| 0
|
12779873
|
<reponame>mcpiroman/jarchiwum-server<filename>loggerBot/common/utils.py
import base64
import datetime
import re
import os
def print_with_time(*args):
print('[' + datetime.datetime.now().strftime("%H:%M:%S") + ']', *args)
def str_2_base64(data, encoding="utf-8"):
return str(base64.b64encode(data.encode(encoding)), encoding)
def base64_2_str(data, encoding="utf-8"):
return str(base64.b64decode(data), encoding)
def get_file_paths_in_dir(dir):
return [p for p in [os.path.join(dir, p) for p in os.listdir(dir)] if os.path.isfile(p)]
def parse_irc_msg(s):
"""Breaks a message from an IRC server into its tags, prefix, command, and arguments.
"""
if not s:
raise IRCBadMessage("Empty IRC line.")
tags = {}
if s.startswith("@"):
s = s[1:]
tags_str, s = s.split(" ", 1)
tags = deconstruct_irc_tags(tags_str)
prefix = ''
trailing = []
if s[0] == ':':
prefix, s = s[1:].split(' ', 1)
if s.find(' :') != -1:
s, trailing = s.split(' :', 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
command = args.pop(0)
return tags, prefix, command, args
def deconstruct_irc_tags(tags_str):
return {key_value_pair.split("=",1)[0]: unescape_irc_tag_value(key_value_pair.split("=",1)[1]) for key_value_pair in tags_str.split(";")}
def construct_irc_tags(items):
s = ""
is_first_item = True
for key, value in items.items():
if not is_first_item:
s += ";"
s += key + "=" + escape_irc_tag_value(value)
is_first_item = False
return s
def get_irc_user_from_prefix(irc_prefix):
return irc_prefix.split("!", 1)[0]
IRC_TAG_VALUE_ESCAPE_TRANSLATION = {";": "\\:", " ": "\\s", "\r": "\\r", "\n": "\\n", "\\": "\\\\"}
def escape_irc_tag_value(tag_value):
mapping = IRC_TAG_VALUE_ESCAPE_TRANSLATION
return escape(tag_value, mapping)
def unescape_irc_tag_value(tag_value):
mapping = {value: key for key, value in IRC_TAG_VALUE_ESCAPE_TRANSLATION.items()}
return escape(tag_value, mapping)
def escape(s, mapping):
return re.sub('({})'.format('|'.join(map(re.escape, mapping.keys()))), lambda m: mapping[m.group()], s)
| 2.5625
| 3
|
snippets/myfft_plot.py
|
rhishi/python-snippets
| 0
|
12779874
|
<reponame>rhishi/python-snippets
import myfft
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
N = 1024
nseries = range(0, 1024)
x = range(0, 1024)
X = myfft.fft(x)
absX = [ abs(v) for v in X ]
#plt.plot(nseries, x, nseries, absX)
#plt.savefig('myfftplot01.png', format='png')
x = [ 1 if i == 100 else 0 for i in range(0, 1024) ]
X = myfft.fft(x)
absX = [ abs(v) for v in X ]
plt.plot(nseries, x, nseries, absX)
plt.savefig('myfftplot02.png', format='png')
| 2.71875
| 3
|
ikdisplay/test/__init__.py
|
ralphm/ikdisplay
| 2
|
12779875
|
"""
Tests for L{ikdisplay}.
"""
| 0.933594
| 1
|
models.py
|
AbigailMathews/concentration
| 0
|
12779876
|
"""models.py - Contains class definitions for Datastore entities
used by the Concentration Game API. Definitions for User, Game, and
Score classes, with associated methods. Additionally, contains
definitions for Forms used in transmitting messages to users."""
### Imports
import random
import pickle
from datetime import date
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
### Import game logic
import game as gm
### User Related Classes and Methods
class User(ndb.Model):
"""User profile"""
name = ndb.StringProperty(required=True)
email = ndb.StringProperty()
total_games = ndb.IntegerProperty(default = 0)
total_score = ndb.IntegerProperty(default = 0)
avg_score = ndb.FloatProperty(default = 0)
def to_form(self):
"""Returns a UserForm representation of a User"""
form = UserForm()
form.name = self.name
form.urlsafe_key = self.key.urlsafe()
form.total_games = self.total_games
form.total_score = self.total_score
form.avg_score = round(self.avg_score)
return form
def calc_score(self):
"""Calculate the player's average score -- to be
called whenever a new game is won"""
avg_score = self.total_score / self.total_games
return avg_score
### Game Related Class and Methods
class Game(ndb.Model):
"""Game object"""
board = ndb.StringProperty(repeated=True)
boardState = ndb.StringProperty(repeated=True)
guesses = ndb.IntegerProperty(required=True, default=0)
cards = ndb.IntegerProperty(required=True, default=52)
status = ndb.StringProperty(required=True, default='In Progress')
user = ndb.KeyProperty(required=True, kind='User')
history = ndb.PickleProperty(repeated=True)
score = ndb.FloatProperty()
@classmethod
def new_game(self, user, cards=52):
"""Creates and returns a new game"""
if cards < 8 or cards > 52 or cards % 2 != 0:
raise ValueError('Cards dealt must be an even number between 8 and 52')
newGame = Game(board=gm.constructBoard(cards),
boardState=gm.initialBoardState(cards),
guesses=0,
cards=cards,
status='In Progress',
user=user)
newGame.put()
return newGame
def to_form(self, message):
"""Returns a GameForm representation of the Game"""
form = GameForm()
form.urlsafe_key = self.key.urlsafe()
form.user_name = self.user.get().name
form.guesses = self.guesses
form.cards = self.cards
form.status = self.status
form.message = message
form.boardState = self.boardState
return form
def to_mini_form(self):
"""Return a MiniGameForm representation of a Game"""
form = MiniGameForm()
form.urlsafe_key = self.key.urlsafe()
form.guesses = self.guesses
form.cards = self.cards
form.status = self.status
return form
def to_history_form(self):
"""Returns a game history form after a game has been won"""
form = HistoryForm()
form.urlsafe_key = self.key.urlsafe()
form.cards = self.cards
form.guesses = self.guesses
form.board = self.board
form.score = self.score
form.history = [h for h in self.history]
return form
def win_game(self):
"""Updates score and user information once game is won"""
# Add the game to the score 'board'
total_score = int(round((self.cards ** 4) / self.guesses))
self.score = total_score
self.put()
score = Score(user=self.user, date=date.today(), cards=self.cards,
guesses=self.guesses, score=total_score)
score.put()
user = self.user.get()
# Add the current score to the user's total score, but handle error
# if user's current score is 0
try:
user.total_score += total_score
except TypeError:
user.total_score = total_score
user.put()
user.avg_score = user.calc_score()
user.put()
### Score Class and Methods
class Score(ndb.Model):
"""Score object"""
user = ndb.KeyProperty(required=True, kind='User')
date = ndb.DateProperty(required=True)
cards = ndb.IntegerProperty(required=True)
guesses = ndb.IntegerProperty(required=True)
score = ndb.FloatProperty(required=True)
def to_form(self):
return ScoreForm(user_name=self.user.get().name,
cards=self.cards,
date=str(self.date),
guesses=self.guesses,
score=self.score)
### Game Forms -- Display
class GameForm(messages.Message):
"""GameForm for outbound game state information"""
urlsafe_key = messages.StringField(1)
guesses = messages.IntegerField(2)
status = messages.StringField(3)
message = messages.StringField(4)
boardState = messages.StringField(5, repeated=True)
user_name = messages.StringField(6)
cards = messages.IntegerField(7)
class MiniGameForm(messages.Message):
"""Abbreviated Game Form for reporting, rather than play purposes"""
urlsafe_key = messages.StringField(1)
guesses = messages.IntegerField(2)
cards = messages.IntegerField(3)
status = messages.StringField(4)
class HistoryForm(messages.Message):
"""Form to display a game history, as well as score information"""
urlsafe_key = messages.StringField(1)
cards = messages.IntegerField(2)
guesses = messages.IntegerField(3)
board = messages.StringField(4, repeated=True)
score = messages.FloatField(5)
history = messages.StringField(6, repeated=True)
class MiniGameForms(messages.Message):
"""Hold a list of abbreviated Game Forms"""
games = messages.MessageField(MiniGameForm, 1, repeated=True)
class NewGameForm(messages.Message):
"""Used to create a new game"""
user_name = messages.StringField(1, required=True)
cards = messages.IntegerField(2, default=52)
### Gameplay Forms
class FlipCardForm(messages.Message):
"""Form to allow players to guess a card by supplying its index"""
queryCard = messages.IntegerField(1, required=True)
class CardForm(messages.Message):
"""Form to respond to player guess by revealing a card value"""
cardValue = messages.StringField(1)
class MakeGuessForm(messages.Message):
"""Used to make a move in an existing game"""
card1 = messages.IntegerField(1, required=True)
card2 = messages.IntegerField(2, required=True)
class HintForm(messages.Message):
"""Send the index of a matching card (hint) back to a user"""
hint = messages.IntegerField(1, required=True)
### Score Forms
class ScoreForm(messages.Message):
"""ScoreForm for outbound Score information"""
user_name = messages.StringField(1, required=True)
date = messages.StringField(2, required=True)
cards = messages.IntegerField(3, required=True)
guesses = messages.IntegerField(4, required=True)
score = messages.FloatField(5, required=True)
class ScoreForms(messages.Message):
"""Return multiple ScoreForms"""
items = messages.MessageField(ScoreForm, 1, repeated=True)
## User and Rankings Message Classes
class UserForm(messages.Message):
"""User detail form"""
name = messages.StringField(1)
urlsafe_key = messages.StringField(2)
total_games = messages.IntegerField(3)
total_score = messages.IntegerField(4)
avg_score = messages.FloatField(5)
class UserForms(messages.Message):
"""Return information mulitiple users for ranking"""
users = messages.MessageField(UserForm, 1, repeated=True)
### Assorted Message Classes
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
message = messages.StringField(1, required=True)
| 3.171875
| 3
|
examples/apps/kinesis-analytics-process-kpl-record/aws_kinesis_agg/__init__.py
|
eugeniosu/serverless-application-model
| 326
|
12779877
|
#Kinesis Aggregation/Deaggregation Libraries for Python
#
#Copyright 2014, Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
#Licensed under the Amazon Software License (the "License").
#You may not use this file except in compliance with the License.
#A copy of the License is located at
#
# http://aws.amazon.com/asl/
#
#or in the "license" file accompanying this file. This file is distributed
#on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
#express or implied. See the License for the specific language governing
#permissions and limitations under the License.
import md5
#Message aggregation protocol-specific constants
#(https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md)
MAGIC = '\xf3\x89\x9a\xc2'
DIGEST_SIZE = md5.digest_size
#Kinesis Limits
#(https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html)
MAX_BYTES_PER_RECORD = 1024*1024 # 1 MB
| 1.359375
| 1
|
py_neo4j_migrations/main.py
|
matt-l-w/py_neo4j_migrations
| 0
|
12779878
|
<gh_stars>0
import typer
app = typer.Typer()
from . import database as db
@app.command()
def migrate():
def get_migrations(tx):
return tx.run("""
MATCH (m:MIGRATION) RETURN m ORDER BY m.created_at ASC;
""")
results = db.read(get_migrations)
typer.echo(f"{[result for result in results]}")
if __name__ == "__main__":
app()
| 2.390625
| 2
|
Scripts/codewars8.py
|
mateusvarelo/Codewars
| 0
|
12779879
|
def to_weird_case(string='This is a test'):
nova_string = [let.upper() if i % 2 == 0 else let.lower()
for i,let in enumerate(string)
]
print(nova_string)
return ''.join(nova_string)
print(to_weird_case())
| 3.890625
| 4
|
up.py
|
Chupachu/LocMaker
| 0
|
12779880
|
import os
import sys
import shutil
from io import BytesIO
from functools import wraps
from textwrap import dedent
from random import choice, shuffle
from collections import defaultdict
import urllib.request
required = ["locmaker.py","locmaker_README.txt","Run.cmd"]
for item in required:
print('Downloading '+item+'...')
url = 'https://raw.github.com/Chupachu/LocMaker/master/'+item
urllib.request.urlretrieve(url, item)
optionals = ["countries.txt","ideologies.txt","out.yml"]
for item in optionals:
if not os.path.isfile(item):
print('Downloading '+item+'...')
url = 'https://raw.github.com/Chupachu/LocMaker/master/'+item
urllib.request.urlretrieve(url, item)
| 2.703125
| 3
|
voxel/VoxelLogic.py
|
vincentlooi/alpha_zero
| 0
|
12779881
|
<gh_stars>0
import numpy as np
import random
class BoxListGenerator(object):
def __init__(self, min_x=1, max_x=1, min_y=1, max_y=1, min_z=1, max_z=1):
assert max_x >= min_x > 0 and max_y >= min_y > 0 and max_z >= min_z > 0
self.min_x = min_x
self.min_y = min_y
self.min_z = min_z
self.max_x = max_x
self.max_y = max_y
self.max_z = max_z
def generate(self, n, max_cells=None, sort=True):
boxes = []
acc_cells = 0
while len(boxes) < n:
if max_cells is not None and acc_cells >= max_cells:
break
w = random.randint(self.min_x, self.max_x)
h = random.randint(self.min_y, self.max_y)
d = random.randint(self.min_z, self.max_z) # depth (z)
acc_cells += w * h * d
boxes.append((w,h,d))
if sort:
boxes = self.sort_box_list(boxes)
return boxes
def sort_box_list(self, box_list):
str_boxes = sorted(["%d_%d_%d"%(i[0],i[1],i[2]) for i in box_list])
sorted_boxes = [np.array(b.split("_")).astype(np.int32) for b in str_boxes]
return sorted_boxes
class Board(object):
def __init__(self, x, y, z, n, box_list_gen=None):
"Set up initial board configuration."
self.x = x
self.y = y
self.z = z
self.n = n
n_cells = n * 3 # x,y,z for each
assert n > 0 and x % 3 == 0
y_rows = ((n_cells / x) + int(n_cells % x > 0))
self.cache_rows = y_rows / y + int(y_rows % y > 0)
self.len_x = x
self.len_y = y
self.len_z = z + self.cache_rows
self.total_cells = self.len_x * self.len_y * self.len_z
self.total_actions = self.x * self.y * self.z * self.n
self.box_list_generator = BoxListGenerator(min_x=1, max_x=int(self.len_x/2) + 1, min_y=1, max_y=int(self.y/2)+1, min_z=1, max_z=2)
if box_list_gen is not None:
assert type(box_list_gen) == BoxListGenerator
self.box_list_generator = box_list_gen
self.pieces = None
self.reset()
def reset(self):
self.pieces = np.zeros((self.len_z,self.len_y,self.len_x), dtype=np.int8)
box_list = self.generate_boxes()
self._fill_pieces_with_box_list(box_list)
self.box_list_area = self.calculate_box_list_area()
def setBoard(self, board_pieces):
self.pieces = board_pieces
self.box_list_area = self.calculate_box_list_area()
def _fill_pieces_with_box_list(self, box_list):
data_flat = np.zeros(self.cache_rows * self.y * self.x)
data_flat[:len(box_list) * 3] = np.array(box_list).flatten()
data = data_flat.reshape((self.cache_rows, self.y, self.x))
# print(data_flat.shape)
self.pieces[self.z:] = data
# print(data)
def calculate_box_list_area(self):
cache = self.pieces[self.z:,:].copy()
if self.x % 3 != 0:
cache = cache[:,:,:-(self.x % 3)]
cache = cache.flatten()
box_list_area = sum([cache[i] * cache[i+1] * cache[i+2] for i in xrange(0,len(cache),3)])
return int(box_list_area)
def generate_boxes(self):
sorted_boxes = self.box_list_generator.generate(n=self.n, max_cells=self.x * self.y * self.z, sort=True)
# print(sorted_boxes)
# print(len(sorted_boxes))
return sorted_boxes
def is_full(self):
return np.all(self.pieces[:self.z]==1)
def get_occupied_count(self):
return int(np.sum(self.pieces[:self.z])) # since occupied are 1, non-occ are 0
def get_score(self):
occ_cnt = self.get_occupied_count()
half_cnt = min(self.box_list_area, self.z * self.y * self.x) / 2.
occ_score = (float(occ_cnt - half_cnt) / half_cnt)# ** 2
# occ_score = -occ_score if occ_cnt < half_cnt else occ_score
return occ_score
def is_valid_placement(self, square, box_size):
x,y,z = square
w,h,d = box_size
assert w!=0 and h!=0 and d!=0
assert x < self.x and y < self.y and z < self.z
if self.pieces[z,y,x]==0: # not occupied
if (x+w-1) < self.x and (y+h-1) < self.y and (z+d-1) < self.z:
if np.sum(self.pieces[z:z+d,y:y+h,x:x+w]) == 0: # none of the placement cells are occupied
if (z+d) < self.z: # if not on ground
# CHECK IF placement is on top of a sufficient number of occupied cells
is_stable = np.sum(self.pieces[z+d,y:y+h,x:x+w]) >= w*h
if not is_stable:
return False
if y > 0: # if not next to grid wall
is_at_wall = np.sum(self.pieces[z:z+d,y-1,x:x+w]) >= (w*d / 2 + 1) # at least bigger than half next to it
return is_at_wall
return True
return False
def get_legal_squares(self, box_size):
"""Returns all the legal moves for the box size
"""
# assert len(box_size) == 2 # box_size: w,h
moves = set() # stores the legal moves.
w,h,d = box_size
for z in xrange(self.z):
for y in xrange(self.y):
for x in xrange(self.x):
square = (x,y,z)
if self.is_valid_placement(square, box_size):
moves.add(square)
return list(moves)
def get_legal_moves(self, box_idx):
"""Returns all the legal moves for the box size
"""
# assert len(box_size) == 2 # box_size: w,h
box_size = self.get_box_size_from_idx(box_idx)
if box_size[0] == 0:
return []
legal_squares = self.get_legal_squares(box_size)
# print(sorted(legal_squares))
return [self.get_action_from_square_and_box_idx(sq, box_idx) for sq in legal_squares]
def get_legal_moves_all(self):
legal_moves = []
for box_idx in xrange(self.n):
legal_moves += self.get_legal_moves(box_idx)
return legal_moves
def has_legal_moves(self, box_size):
assert len(box_size) == 3 # box_size: w,h,d
legal_moves = self.get_legal_squares(box_size)
return len(legal_moves) != 0
def has_legal_moves_all(self):
for box_idx in xrange(self.n):
w,h,d = self.get_box_size_from_idx(box_idx)
if w > 0 and self.has_legal_moves((w,h,d)):
return True
return False
def get_box_size_from_idx(self, box_idx):
assert box_idx < self.n
x = box_idx * 3 % self.x
y = box_idx * 3 / self.x
z = y / self.y
y = y - z * self.y
box_cells = self.pieces[self.z + z, y, x:x+3]
w, h, d = box_cells
return (w,h,d)
def get_action_from_square_and_box_idx(self, square, box_idx):
x, y, z = square
return box_idx * self.x * self.y * self.z + z * self.x * self.y + y * self.x + x
def get_square_and_box_size_from_action(self, action):
box_idx = action / (self.x * self.y * self.z)
square_idx = action % (self.x * self.y * self.z)
w,h,d = self.get_box_size_from_idx(box_idx)
if w == 0:
return None, None, box_idx
x,y,z = self.boardIndexToSquare(square_idx)
return (x,y,z), (w,h,d), box_idx
def is_action_valid(self, action):
sq, box_size, box_idx = self.get_square_and_box_size_from_action(action)
if sq is None:
return False
return self.is_valid_placement(sq, box_size)
def boardIndexToSquare(self, idx):
z = idx / (self.x * self.y)
rem = idx % (self.x * self.y)
y = rem / self.x
x = rem % self.x
return x,y,z
def move_box(self, square, box_size):
x,y,z = square
assert x < self.x and y < self.y and z < self.z
w,h,d = box_size
self.pieces[z:z+d,y:y+h,x:x+w] = 1
def execute_move(self, action):
sq, box_size, box_idx = self.get_square_and_box_size_from_action(action)
if sq is None:
return
self.move_box(sq, box_size)
# remove box idx
cache_flat = self.pieces[self.z:].flatten()
cache_flat = np.delete(cache_flat, [box_idx*3, box_idx*3+1,box_idx*3+2])
cache_flat = np.hstack((cache_flat,[0,0,0]))
self.pieces[self.z:] = np.reshape(cache_flat, (len(cache_flat) / (self.y * self.x), self.y, self.x))
if __name__ == '__main__':
# from VoxelRender import BoardRenderer
x = 6
y = 7
z = 3
n = 10
b = Board(x,y,z,n)
random_box_idx = n / 3
valid_actions = sorted(b.get_legal_moves(random_box_idx))
cnt = 0
for action in valid_actions:
sq, box_sz, box_idx = b.get_square_and_box_size_from_action(action)
print(sq, box_sz)
cnt += 1
# if cnt >= 5:
# break
random_action = random.choice(valid_actions)
print("Picking action:", random_action)
# execute action
b.execute_move(random_action)
print("Score %.3f"%(b.get_score()))
| 3.140625
| 3
|
RcTorch/old_backprop_code.py
|
blindedjoy/RcTorch
| 3
|
12779882
|
else:
#+++++++++++++++++++++++++++++++ backprop +++++++++++++++++++++++++++++++
trainable_parameters = []
trainable_parameters = []
for p in self.parameters():
if p.requires_grad:
trainable_parameters.append(p)
for i, p in enumerate(trainable_parameters):
print(f'Trainable parameter {i} {p.name} {p.data.shape}')
running_loss = 0
train_losses = []
if not optimizer:
optimizer = optim.Adam(self.parameters(), lr=learning_rate)
min_loss = float("Inf")
epochs_not_improved = False
bias_buffer = torch.ones((y.shape[0],1),**self.dev)
if self.epochs == None:
endless = True
else:
endless = False
self.freeze_weights()
assert self.LinOut.weight.requires_grad
states = self.state.clone()
states = states.to(self.device)
######. TRAIN STATES ################### SEPARATELY FROM THE OTHER FORWARD PASS
#X_detached = self.X.detach()
#X_detached.requires_grad_(True)
#self.dh_dx = torch.zeros(0,**self.dev)
X_detached = self.unscaled_X.clone().detach().requires_grad_(True)
if scale_x:
X_detached = (X_detached- self._input_means) / self._input_stds
if out_weights:
self.LinOut.weight.data = out_weights["weight"]
self.LinOut.bias.data = out_weights["bias"]
#self.LinOut.weight.requires_grad_(False)
#self.LinOut.bias.requires_grad_(False)
state_list = []
if self.feedback:
assert False, "not implimented"
"""
for t in range(0, X_detached.shape[0]):
input_t = X_detached[t, :].T
state_t, output_t = self.train_state(t, X = input_t,
state = states[t,:],
y = None, output = True)
state_list.append(state_t)
with no_grad():
dht_dt = dfx(X_detached, state_t)
dht_dy = dfx(state_t, output_t)
if t > 2:
dht_dh = dfx(state_list[t-1], state_t)
dyt_dx = dfx(X_detached, output_t) #<-- calculating the derivative at each timestep.
if t == 0:
self.dh_dhs = []
self.dh_dts = []
self.dh_dys = []
self.dy_dxs = []
self.outputs = []
self.outputs = []
self.dh_dt = dht_dt
#self.dy_dh =
#elif t > 1:
self.dh_dt = torch.cat((self.dh_dt, dht_dt))
#self.dy_dh = torch.cat((self.dh_dx, dht_dx))
self.dh_dys.append(dht_dy)
self.dh_dts.append(dht_dt)
self.outputs.append(output_t)
self.dy_dxs.append(dyt_dx)
self.dh_dhs.append(dyt_dx)
states = cat([states, state_t.view(-1, self.n_nodes)], axis = 0)
#####################################################
self.dy_dx = dfx(X_detached, torch.vstack(self.outputs)) #one time derivative doesn't work very well.
#####################################################################
del states
self.hidden_transitions = torch.hstack(self.dh_dhs).cpu().T
self.dy_dx_matrix = torch.hstack(self.dy_dxs)
#del self.dy_dxs
#proper scaling:
self.dy_dx = self.dy_dx_matrix.sum(axis = 0 )# / self._input_stds)*self._output_stds
#undo the output layer:
self.dh_dx = self.dy_dx / (self.LinOut.weight.view(-1,self.n_outputs) * (self.n_nodes + 1))
"""
#self.dh_dx_for_backwards_pass = self.dh_dx.mean(axis = 0).view(-1,1).clone().detach()
#assert False
#extended_states = hstack((X_detached, states[1:]))
self.loss_history, reassign = [], False
base_derivative_calculated = False
# The recurrence class will do the forward pass for backprop. We will give it the derivative after doing another forward pass
#order: ctx, input, states, LinIn, LinOut, LinRes, bias_, n_nodes, activation_function, leaking_rate, noise, feedback, y, tensor_args, LinFeedback = None
final_grad = None
if 1 == 0:
pass
else:
for e in range(self.epochs):
self.states = self.state.clone()
self.states = self.states.to(self.device).view(1,-1)
optimizer.zero_grad()
"""
for t in range(0, X_detached.shape[0]):
input_t = self.X[t, :].T
train_states_class = Recurrence()
state_t = train_states_class.apply(input_t,
self.states[t,:],
self.LinIn,
self.LinOut,
self.LinRes,
self.bias,
self.n_nodes,
self.activation_function,
self.leaking_rate,
self.noise,
self.feedback,
y,
self.tensor_args,
self.dh_dts[t].detach(),
self.hidden_transitions[t,:].detach(),
None)
"""
for t in range(0, self.X.shape[0]):
input_t = self.X[t, :].T
state_t, output_t = self.train_state(t, X = input_t,
state = self.states[t,:],
y = None, output = True, retain_grad = True)
state_t.retain_grad()
if full_grads:
dyt_dx = dfx(self.X, output_t)
if not t:
self.dy_dxs = [dyt_dx]
else:
self.dy_dxs.append(dyt_dx)
self.states= cat([self.states, state_t.view(-1, self.n_nodes)], axis = 0)
if not t:
outputs = output_t
else:
outputs = torch.cat((outputs, output_t), axis = 0)
if full_grads:
self.dy_dx_matrix = torch.hstack(self.dy_dxs)
self.dy_dx = self.dy_dx_matrix.sum(axis = 0 )
extended_states = hstack((self.X.detach(), self.states[1:]))
self.yfit = self.forward(extended_states)
#self.dy_dh = dfx(states, self.yfit)
#with torch.no_grad():
# if self.track_in_grad or ODE:
# self.dy_dx_orig = dfx(self.X, self.yfit)
# self.dh_dx = self.dy_dx_orig / (self.LinOut.weight * self.LinOut.weight.shape[1])
if ODE or self.track_in_grad:
#assert self.yfit.shape == self.dh_dx.shape, f'{self.yfit.shape} != {self.dh_dx.shape}'
if scale_x:
yfit = self.yfit / self._input_stds
with torch.no_grad():
if not full_grads:
self.dy_dx = dfx(self.X, self.yfit)
else:
self.dy_dx = self.dy_dx / self._input_stds
if ODE:
loss = self.criterion(self.X, self.yfit, self.dy_dx)
else:
#self.yfit = self.yfit * self._output_stds + self._output_means
loss = self.criterion(self.yfit, y)
assert loss.requires_grad
#assert False, loss
loss.backward(retain_graph = True)
assert type(self.X.grad != None)
#save best weights
if e > save_after_n_epochs:
if float(loss) < min(self.loss_history):
best_bias, best_weight = self.LinOut.bias.clone(), self.LinOut.weight.clone()
self.LinOut.bias.data, self.LinOut.weight.data = best_bias, best_weight.view(*self.LinOut.weight.shape)
self.final_grad = self.dy_dx.clone()
self.loss_history.append(float(loss))
optimizer.step()
if e % 100 == 0:
print("Epoch: {}/{}.. ".format(e+1, self.epochs),
"Training Loss: {:.3f}.. ".format(torch.log(loss)))
#early stopping
if self.patience:
if e > 10:
if loss < min_loss:
epochs_not_improved = 0
min_loss = loss
else:
epochs_not_improved += 1
if e > 10 and epochs_not_improved >= self.patience:
print('Early stopping at epoch' , e, 'loss', loss)
early_stop = True
break
else:
continue
e=e+1
#to avoid unstable solutions consider an additional convergence parameter
#early stopping code from the following article:
#https://www.kaggle.com/akhileshrai/tutorial-early-stopping-vanilla-rnn-pytorch
#for the final state we want to save that in self.state
self.out_weights = self.LinOut.weight
self.out_weights._name_ = "out_weights"
#extended_states = hstack((self.X, self.state))
| 2.109375
| 2
|
tests/test_run.py
|
SOFIE-project/Provisioning-and-Discovery
| 1
|
12779883
|
<reponame>SOFIE-project/Provisioning-and-Discovery
"""
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
from sys import argv
import subprocess
import time
import sys
import json
import requests
sys.path.append("./src")
from sofie_pd_component.gatt_application import BLE
from sofie_pd_component.eddystone_url import startUrlAdvertise, stopUrlAdvertise
from sofie_pd_component.eddystone_uuid import startUuidAdvertise, stopUuidAdvertise
from sofie_pd_component.dns import run
if sys.version_info > (3, 0):
DEVNULL = subprocess.DEVNULL
else:
DEVNULL = open(os.devnull, "wb")
URL = "//google.com"
NAME = "TEST"
UUID = "00000000000000000000000000000000"
def restart_bluetooth():
"""This method is used to reset Bluetooth interface for clearing any previous settings. """
print("Restarting Bluetooth")
subprocess.call("sudo hciconfig -a hci0 down", shell=True, stdout=DEVNULL)
time.sleep(2)
subprocess.call("sudo hciconfig -a hci0 up", shell=True, stdout=DEVNULL)
def start_uuid_advertise():
print("Test - Start UUID Advertising")
restart_bluetooth()
try:
if len(UUID) == 32:
NAMESPACE = UUID[:10]
INSTANCEID = UUID[10:]
subprocess.call(["sudo", "-v"])
startUuidAdvertise('hci0', bytes.fromhex('E7'), bytes.fromhex(NAMESPACE), bytes.fromhex(INSTANCEID))
except:
print("Test Failed")
def stop_uuid_advertise():
print("Test - Stop UUID Advertising")
try:
stopUuidAdvertise()
except:
print("Test Failed")
def start_url_advertise():
print("Test - Start URL Advertising")
restart_bluetooth()
try:
startUrlAdvertise("http:" + URL)
except:
print("Test Failed")
def stop_url_advertise():
print("Test - Stop URL Advertising")
try:
stopUrlAdvertise()
except:
print("Test Failed")
def server_startup():
print("Test - Starting DNS-SD")
try:
run()
except:
print("Test Failed")
if __name__ == '__main__':
print("##### Starting Tests ######")
start_uuid_advertise()
stop_uuid_advertise()
start_url_advertise()
stop_url_advertise()
print("##### Ending Tests #####")
| 1.984375
| 2
|
Crawler/src/parser/test_parser.py
|
cam626/RPEye-Link-Analysis
| 2
|
12779884
|
'''
This contains tests for the parse and get_bad_paths methods.
'''
import unittest
import parser
class TestParser(unittest.TestCase):
'''
This contains tests for the parse and get_bad_paths methods.
'''
def test_empty(self):
''' Parse an empty string.'''
self.assertEqual(parser.parse(''), [])
def test_one_disallow(self):
''' Parse a string with one disallow.'''
self.assertEqual(parser.parse("Disallow: /stuff/"), ['/stuff/'])
def test_two_disallows(self):
''' Parse a string with two disallows.'''
self.assertEqual(parser.parse("Disallow: /stuff/\nDisallow: /home/"), ['/stuff/', '/home/'])
def test_allow(self):
''' Parse an string with an allow statemnt.'''
self.assertEqual(parser.parse("Allow: /stuff/"), [])
def test_applicable_useragent(self):
''' Parse a string with a user-agent and a relevant disallow.'''
self.assertEqual(parser.parse("User-agent: * \nDisallow: /stuff/"), ['/stuff/'])
def test_not_applicable_useragent(self):
''' Parse a string with an unknown user-agent and a disallow that is ignored.'''
self.assertEqual(parser.parse("User-agent: someone else \nDisallow: /stuff/"), [])
def test_basic_page(self):
''' Test a simple robots.txt page. '''
expected_result = ['/cgi-bin/', '/rcs/', '/~sibel/poetry/poems/', '/~sibel/poetry/books/', '/~musser/dagproc']
self.assertEqual(parser.get_bad_paths("http://cs.rpi.edu/robots.txt"), expected_result)
def test_nonexistent_page(self):
''' Test a page that doesn't exist.'''
self.assertEqual(parser.get_bad_paths("http://rpi.edu/robots.taxt"), [])
def test_targeted_disallows(self):
''' Test a page that has targeted disallows.'''
expected_result = ['/feed/', '/c/accounts/', '/c/crontab/', '/c/graphics/', '/c/locale/', '/c.new/', '/c.bak/', '/c_hacks', '/c/pinc/', '/c/setup/', '/c/stats/', '/c/tools/', '/c/users/', '/down/', '/dpmail/', '/d', '/out', '/jpgraph/', '/jpgraph-1.14', '/archive', '/projects', '/mailman/', '/noncvs', '/phpbb2', '/phpbb3', '/phpbb-3.2.0', '/phpmyadmin', '/sawiki', '/squirrels', '/stats/', '/tools', '/w', '/wikiheiro']
self.assertEqual(parser.get_bad_paths("https://www.pgdp.net/robots.txt"), expected_result)
def test_allows(self):
''' Test a page that has allows.'''
self.assertEqual(parser.get_bad_paths("https://www.choiceofgames.com/robots.txt"), [])
if __name__ == "__main__":
unittest.main()
| 3.4375
| 3
|
project/timenow.py
|
tarabuk1n/Debian-Miniconda-Installation
| 1
|
12779885
|
from datetime import datetime
time_now = datetime.now()
print(time_now.strftime('%B/%d/%Y:%H/%M'))
| 3.328125
| 3
|
PyCTBN/PyCTBN/utility/json_importer.py
|
pietroepis/PyCTBN
| 1
|
12779886
|
<reponame>pietroepis/PyCTBN
# License: MIT License
import json
import typing
import pandas as pd
from .abstract_importer import AbstractImporter
class JsonImporter(AbstractImporter):
"""Implements the abstracts methods of AbstractImporter and adds all the necessary methods to process and prepare
the data in json extension.
:param file_path: the path of the file that contains tha data to be imported
:type file_path: string
:param samples_label: the reference key for the samples in the trajectories
:type samples_label: string
:param structure_label: the reference key for the structure of the network data
:type structure_label: string
:param variables_label: the reference key for the cardinalites of the nodes data
:type variables_label: string
:param time_key: the key used to identify the timestamps in each trajectory
:type time_key: string
:param variables_key: the key used to identify the names of the variables in the net
:type variables_key: string
:_array_indx: the index of the outer JsonArray to extract the data from
:type _array_indx: int
:_df_samples_list: a Dataframe list in which every dataframe contains a trajectory
:_raw_data: The raw contents of the json file to import
:type _raw_data: List
"""
def __init__(self, file_path: str, samples_label: str, structure_label: str, variables_label: str, time_key: str,
variables_key: str, cims_label: str = None):
"""Constructor method
.. note::
This constructor calls also the method ``read_json_file()``, so after the construction of the object
the class member ``_raw_data`` will contain the raw imported json data.
"""
self._samples_label = samples_label
self._structure_label = structure_label
self._variables_label = variables_label
self._cims_label = cims_label
self._time_key = time_key
self._variables_key = variables_key
self._df_samples_list = None
self._array_indx = None
super(JsonImporter, self).__init__(file_path)
self._raw_data = self.read_json_file()
def import_data(self, indx: int = 0) -> None:
"""Implements the abstract method of :class:`AbstractImporter`.
:param indx: the index of the outer JsonArray to extract the data from, default to 0
:type indx: int
"""
self._array_indx = indx
self._df_samples_list = self.import_trajectories(self._raw_data)
self._sorter = self.build_sorter(self._df_samples_list[0])
self.compute_row_delta_in_all_samples_frames(self._df_samples_list)
self.clear_data_frame_list()
self._df_structure = self.import_structure(self._raw_data)
self._df_variables = self.import_variables(self._raw_data)
if self._cims_label != None:
self._cims = self._raw_data[indx][self._cims_label]
def import_trajectories(self, raw_data: typing.List) -> typing.List:
"""Imports the trajectories from the list of dicts ``raw_data``.
:param raw_data: List of Dicts
:type raw_data: List
:return: List of dataframes containing all the trajectories
:rtype: List
"""
return self.normalize_trajectories(raw_data, self._array_indx, self._samples_label)
def import_structure(self, raw_data: typing.List) -> pd.DataFrame:
"""Imports in a dataframe the data in the list raw_data at the key ``_structure_label``
:param raw_data: List of Dicts
:type raw_data: List
:return: Dataframe containg the starting node a ending node of every arc of the network
:rtype: pandas.Dataframe
"""
return self.one_level_normalizing(raw_data, self._array_indx, self._structure_label)
def import_variables(self, raw_data: typing.List) -> pd.DataFrame:
"""Imports the data in ``raw_data`` at the key ``_variables_label``.
:param raw_data: List of Dicts
:type raw_data: List
:return: Datframe containg the variables simbolic labels and their cardinalities
:rtype: pandas.Dataframe
"""
return self.one_level_normalizing(raw_data, self._array_indx, self._variables_label)
def read_json_file(self) -> typing.List:
"""Reads the JSON file in the path self.filePath.
:return: The contents of the json file
:rtype: List
"""
with open(self._file_path) as f:
data = json.load(f)
if (isinstance(data,list)):
return data
else:
return [data]
def one_level_normalizing(self, raw_data: typing.List, indx: int, key: str) -> pd.DataFrame:
"""Extracts the one-level nested data in the list ``raw_data`` at the index ``indx`` at the key ``key``.
:param raw_data: List of Dicts
:type raw_data: List
:param indx: The index of the array from which the data have to be extracted
:type indx: int
:param key: the key for the Dicts from which exctract data
:type key: string
:return: A normalized dataframe
:rtype: pandas.Datframe
"""
return pd.DataFrame(raw_data[indx][key])
def normalize_trajectories(self, raw_data: typing.List, indx: int, trajectories_key: str) -> typing.List:
"""
Extracts the trajectories in ``raw_data`` at the index ``index`` at the key ``trajectories key``.
:param raw_data: List of Dicts
:type raw_data: List
:param indx: The index of the array from which the data have to be extracted
:type indx: int
:param trajectories_key: the key of the trajectories objects
:type trajectories_key: string
:return: A list of daframes containg the trajectories
:rtype: List
"""
dataframe = pd.DataFrame
smps = raw_data[indx][trajectories_key]
df_samples_list = [dataframe(sample) for sample in smps]
return df_samples_list
def build_sorter(self, sample_frame: pd.DataFrame) -> typing.List:
"""Implements the abstract method build_sorter of the :class:`AbstractImporter` for this dataset.
"""
columns_header = list(sample_frame.columns.values)
columns_header.remove(self._time_key)
return columns_header
def clear_data_frame_list(self) -> None:
"""Removes all values present in the dataframes in the list ``_df_samples_list``.
"""
for indx in range(len(self._df_samples_list)):
self._df_samples_list[indx] = self._df_samples_list[indx].iloc[0:0]
def dataset_id(self) -> object:
return self._array_indx
def import_sampled_cims(self, raw_data: typing.List, indx: int, cims_key: str) -> typing.Dict:
"""Imports the synthetic CIMS in the dataset in a dictionary, using variables labels
as keys for the set of CIMS of a particular node.
:param raw_data: List of Dicts
:type raw_data: List
:param indx: The index of the array from which the data have to be extracted
:type indx: int
:param cims_key: the key where the json object cims are placed
:type cims_key: string
:return: a dictionary containing the sampled CIMS for all the variables in the net
:rtype: Dictionary
"""
cims_for_all_vars = {}
for var in raw_data[indx][cims_key]:
sampled_cims_list = []
cims_for_all_vars[var] = sampled_cims_list
for p_comb in raw_data[indx][cims_key][var]:
cims_for_all_vars[var].append(pd.DataFrame(raw_data[indx][cims_key][var][p_comb]).to_numpy())
return cims_for_all_vars
| 2.921875
| 3
|
tests/test_tools.py
|
gothill/python-fedex
| 100
|
12779887
|
"""
Test module for the Fedex Tools.
"""
import unittest
import logging
import sys
sys.path.insert(0, '..')
import fedex.config
import fedex.services.ship_service as service # Any request object will do.
import fedex.tools.conversion
logging.getLogger('suds').setLevel(logging.ERROR)
logging.getLogger('fedex').setLevel(logging.INFO)
class FedexToolsTests(unittest.TestCase):
"""
These tests verify that the fedex tools are working properly.
"""
def test_conversion_tools(self):
# Empty config, since we are not actually sending anything
config = fedex.config.FedexConfig(key='', password='',
account_number='', meter_number='',
use_test_server=True)
# We need a mock suds object, a request object or sub-object will do.
waybill_request = service.FedexProcessShipmentRequest(config)
obj = waybill_request.create_wsdl_object_of_type('ProcessShipmentRequest')
# Test basic sobject to dict.
dict_obj = fedex.tools.conversion.basic_sobject_to_dict(obj)
assert type(dict_obj) == dict
# Test with serialization and case conversion.
dict_obj = fedex.tools.conversion.sobject_to_dict(obj, key_to_lower=True, json_serialize=True)
assert type(dict_obj) == dict
# JSON string object test
dict_obj = fedex.tools.conversion.sobject_to_json(obj)
assert dict_obj, "Expecting a JSON string object."
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
unittest.main()
| 2.828125
| 3
|
binner/algo_single.py
|
Zbooni/binner
| 29
|
12779888
|
from .algo import Algo
from .algo_code import AlgoCode
from .entity_slot import Slot
from .entity_space import Space
from . import log, show_adding_box_log
from .exception import DistributionException
import time
class AlgoSingle(Algo):
"""
pack items into a single
bin
for single bin packing we merely
need to operate on one bin. Don't
accept input bins larger than size one
@param item_collection: set of items
@returns one bin packed with items
"""
def run(self):
log.debug("Entering algorithm SINGLE")
bin_collection = self.bins
item_collection =self.items
if len(bin_collection.items) == 0 or len(bin_collection.items) > 1:
raise DistributionException("Single takes only one item")
bin = bin_collection.next()
"""
checks with the bin can continue within the space
for single algo
"""
def continue_fn(bin, space, item):
if bin.occupied_space(space, item):
return AlgoCode.NO_SPACE
m_y = bin.get_min_y_pos(space.y)
if space.x + (item.w > bin.w):
""" try z now """
space.z += item.d
space.x = 0
else:
space.x += 1
""" if space.z fails and so does space.x """
""" go up in height make sure y """
""" is at the proper juxtaposition """
if space.z + item.d > bin.d:
space.y += m_y.max_y
space.x = m_y.min_x
space.z = m_y.min_z
if int(space.y + item.h) > bin.h:
return AlgoCode.LAST_ITEM
return AlgoCode.FOUND_SPACE
while bin:
log.info("Trying to allocate items for bin: {0}".format(bin.id))
item_collection.reset()
bin.start_time = time.time()
item = item_collection.next()
while item:
item = item_collection.current()
if not bin.can_fit( item ) :
item_collection.next()
continue
space = Space(x=0, y=0, z=0)
""" if item.w > bin.w: """
""" self.binner.add_lost(item) """
can_continue = continue_fn(bin, space, item)
while can_continue == AlgoCode.NO_SPACE:
""" if were at the top of the box """
""" we cannot allocate any more space so we can move on """
space.compute_next_sequence()
can_continue = continue_fn(bin, space, item)
if can_continue == AlgoCode.LAST_ITEM:
continue
show_adding_box_log(space, item)
slot = Slot.from_space_and_item(space, item)
bin.append(slot)
item = item_collection.next()
bin.end_time = time.time()
bin = bin_collection.next()
return self.binner
| 2.8125
| 3
|
demos.py
|
tacticsiege/Banana-DQN
| 0
|
12779889
|
import numpy as np
import time
from unityagents import UnityEnvironment
from agent_utils import env_initialize, env_reset, state_reward_done_unpack
from dqn_agent import DQN_Agent
from agent_utils import load_dqn
from agent_utils import load_params, load_weights
def demo_agent(env, agent, n_episodes, epsilon=0.05, seed=0, train_mode=False):
print(f'\r\nRunning demo of \'{agent.name}\' with epsilon={epsilon}')
scores = []
for i in range(1, n_episodes+1):
score = 0
state = env_reset(env, agent.brain_name, train_mode=train_mode)
while True:
action = int(agent.act(state, epsilon))
env_info = env.step(action)[agent.brain_name]
next_state, reward, done = state_reward_done_unpack(env_info)
score += reward
state = next_state
if done:
break
scores.append(score)
print(f'Episode {i}\tScore: {score:.2f}')
print('\r\nDemo complete! Scores:\tMin:{:.2f}\tMax:{:.2f}\tAvg:{:.3f}'.format(
np.min(scores), np.max(scores), np.mean(scores)))
return scores
def demo_saved_agent(env, agent_name, n_episodes=3, epsilon=0.05, seed=0,
train_mode=False, verbose=False):
# initialize environment and scenario info
brain, brain_name, state, action_size, state_size = env_initialize(env, train_mode=train_mode)
# load the agent params and create the agent
params, local_weights, target_weights = load_dqn(agent_name, verbose=verbose)
agent = DQN_Agent(state_size, action_size, brain_name, seed, params=params)
print(agent.display_params())
# set trained agent weights
agent.qnetwork_local.load_state_dict(local_weights)
agent.qnetwork_target.load_state_dict(target_weights)
# run demo
return demo_agent(env, agent,
n_episodes=n_episodes, epsilon=epsilon,
seed=seed, train_mode=train_mode)
def demo_random_agent_discrete(env, n_episodes=3, train_mode=False, verbose=False):
""" Runs the environment using a uniform random action selection policy. """
# setup the environment and get initial info
brain, brain_name, state, action_size, state_size = env_initialize(env, train_mode=train_mode, verbose=verbose)
start_time = time.time()
for n_episode in range(1, n_episodes+1):
# reset the environment for the new episode
state = env_reset(env, brain_name, train_mode=train_mode)
# track scores and the number of steps in an episode
score = 0
steps = 0
while True:
# choose a random action
action = np.random.randint(action_size)
# send action to environment and get updated info
env_info = env.step(action)[brain_name]
next_state, reward, done = state_reward_done_unpack(env_info)
score += reward
steps += 1
# set the state for next iteration
state = next_state
if done:
break # end episode if we get the done signal
print (f'Episode {n_episode} score: {score} in {steps} steps.')
end_time = time.time()
avg_episode_time = (end_time - start_time) / n_episodes
print (f'Random agent demo complete, avg episode duration: {avg_episode_time:.3f}s.')
| 2.21875
| 2
|
BlenderMalt/MaltPipeline.py
|
BlenderAddonsArchive/Malt
| 0
|
12779890
|
# Copyright (c) 2020 BlenderNPR and contributors. MIT license.
import os, time
import bpy
from BlenderMalt import MaltMaterial, MaltMeshes, MaltTextures
__BRIDGE = None
__PIPELINE_PARAMETERS = None
__INITIALIZED = False
TIMESTAMP = time.time()
def get_bridge(world=None):
global __BRIDGE
bridge = __BRIDGE
if bridge is None or bridge.lost_connection:
__BRIDGE = None
try:
if world is None:
bpy.context.scene.world.malt.update_pipeline(bpy.context)
else:
world.malt.update_pipeline(bpy.context)
except:
pass
return __BRIDGE
def set_bridge(bridge):
global __BRIDGE
__BRIDGE = bridge
def set_pipeline_parameters(parameters):
global __PIPELINE_PARAMETERS
__PIPELINE_PARAMETERS = parameters
def set_initialized(initialized):
global __INITIALIZED
__INITIALIZED = initialized
class MaltPipeline(bpy.types.PropertyGroup):
def update_pipeline(self, context):
global TIMESTAMP
TIMESTAMP = time.time()
#TODO: Sync all scenes. Only one active pipeline per Blender instance is supported atm.
pipeline = self.pipeline
if pipeline == '':
current_dir = os.path.dirname(os.path.abspath(__file__))
default_pipeline = os.path.join(current_dir,'.MaltPath','Malt','Pipelines','NPR_Pipeline','NPR_Pipeline.py')
pipeline = default_pipeline
debug_mode = bool(bpy.context.preferences.addons['BlenderMalt'].preferences.debug_mode)
path = bpy.path.abspath(pipeline, library=self.id_data.library)
import Bridge
bridge = Bridge.Client_API.Bridge(path, debug_mode)
import logging as log
log.info('Blender {} {} {}'.format(bpy.app.version_string, bpy.app.build_branch, bpy.app.build_hash))
params = bridge.get_parameters()
set_bridge(bridge)
set_pipeline_parameters(params)
MaltMaterial.reset_materials()
MaltMeshes.reset_meshes()
MaltTextures.reset_textures()
setup_all_ids()
set_initialized(True)
pipeline : bpy.props.StringProperty(name="Malt Pipeline", subtype='FILE_PATH', update=update_pipeline)
# There's no StringVectorProperty ?!?!?
overrides : bpy.props.StringProperty(name='Pipeline Overrides', default='Preview,Final Render')
def draw_ui(self, layout):
layout.prop(self, 'pipeline')
class MALT_PT_Pipeline(bpy.types.Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "world"
bl_label = "Pipeline Settings"
COMPAT_ENGINES = {'MALT'}
@classmethod
def poll(cls, context):
return context.scene.render.engine == 'MALT' and context.world is not None
def draw(self, context):
context.scene.world.malt.draw_ui(self.layout)
classes = (
MaltPipeline,
MALT_PT_Pipeline,
)
def setup_all_ids():
setup_parameters(bpy.data.scenes)
setup_parameters(bpy.data.worlds)
setup_parameters(bpy.data.cameras)
setup_parameters(bpy.data.objects)
setup_parameters(bpy.data.materials)
setup_parameters(bpy.data.meshes)
setup_parameters(bpy.data.curves)
setup_parameters(bpy.data.lights)
MaltMaterial.track_shader_changes(force_update=True)
def setup_parameters(ids):
global __PIPELINE_PARAMETERS
pipeline_parameters = __PIPELINE_PARAMETERS
class_parameters_map = {
bpy.types.Scene : pipeline_parameters.scene,
bpy.types.World : pipeline_parameters.world,
bpy.types.Camera : pipeline_parameters.camera,
bpy.types.Object : pipeline_parameters.object,
bpy.types.Material : pipeline_parameters.material,
bpy.types.Mesh : pipeline_parameters.mesh,
bpy.types.Curve : pipeline_parameters.mesh,
bpy.types.Light : pipeline_parameters.light,
}
for bid in ids:
for cls, parameters in class_parameters_map.items():
if isinstance(bid, cls):
bid.malt_parameters.setup(parameters)
@bpy.app.handlers.persistent
def depsgraph_update(scene, depsgraph):
global __INITIALIZED
if scene.render.engine != 'MALT':
# Don't do anything if Malt is not the active renderer,
# but make sure we setup all IDs the next time Malt is enabled
__INITIALIZED = False
return
if __INITIALIZED == False:
scene.world.malt.update_pipeline(bpy.context)
return
ids = []
class_data_map = {
bpy.types.Scene : bpy.data.scenes,
bpy.types.World : bpy.data.worlds,
bpy.types.Camera : bpy.data.cameras,
bpy.types.Object : bpy.data.objects,
bpy.types.Material : bpy.data.materials,
bpy.types.Mesh : bpy.data.meshes,
bpy.types.Curve : bpy.data.curves,
bpy.types.Light : bpy.data.lights,
}
for update in depsgraph.updates:
# Try to avoid as much re-setups as possible.
# Ideally we would do it only on ID creation.
if update.is_updated_geometry == True or update.is_updated_transform == False:
for cls, data in class_data_map.items():
if isinstance(update.id, cls):
ids.append(data[update.id.name])
setup_parameters(ids)
redraw = False
for update in depsgraph.updates:
if update.is_updated_geometry:
if 'Object' in str(update.id.__class__):
MaltMeshes.unload_mesh(update.id)
if update.id.__class__ == bpy.types.Image:
MaltTextures.unload_texture(update.id)
redraw = True
elif update.id.__class__ == bpy.types.Material:
MaltTextures.unload_gradients(update.id)
redraw = True
if redraw:
for screen in bpy.data.screens:
for area in screen.areas:
area.tag_redraw()
@bpy.app.handlers.persistent
def load_scene(dummy1=None,dummy2=None):
global __INITIALIZED
__INITIALIZED = False
def track_pipeline_changes():
if bpy.context.scene.render.engine != 'MALT':
return 1
try:
scene = bpy.context.scene
malt = scene.world.malt
path = bpy.path.abspath(malt.pipeline, library=malt.id_data.library)
if os.path.exists(path):
stats = os.stat(path)
if stats.st_mtime > TIMESTAMP:
malt.update_pipeline(bpy.context)
except:
import traceback
print(traceback.format_exc())
return 1
def register():
for _class in classes: bpy.utils.register_class(_class)
bpy.types.World.malt = bpy.props.PointerProperty(type=MaltPipeline)
bpy.app.handlers.depsgraph_update_post.append(depsgraph_update)
bpy.app.handlers.load_post.append(load_scene)
bpy.app.timers.register(track_pipeline_changes, persistent=True)
def unregister():
for _class in classes: bpy.utils.unregister_class(_class)
del bpy.types.World.malt
bpy.app.handlers.depsgraph_update_post.remove(depsgraph_update)
bpy.app.handlers.load_post.remove(load_scene)
bpy.app.timers.unregister(track_pipeline_changes)
| 2.140625
| 2
|
portal/migrations/0007_project_access_personnel.py
|
eugenechia95/Project-Document-Submission-Portal
| 1
|
12779891
|
<reponame>eugenechia95/Project-Document-Submission-Portal<filename>portal/migrations/0007_project_access_personnel.py
# Generated by Django 2.0.7 on 2018-08-08 03:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
('portal', '0006_remove_project_personnel'),
]
operations = [
migrations.AddField(
model_name='project',
name='access_personnel',
field=models.ManyToManyField(blank=True, null=True, to='auth.Group'),
),
]
| 1.609375
| 2
|
loggen.py
|
rockb1017/log-generator
| 2
|
12779892
|
import os
from time import sleep
from datetime import datetime
MESSAGE_COUNT = int(os.getenv("MESSAGE_COUNT", 10000))
SIZE = int(os.getenv("SIZE", 128))
FREQ = float(os.getenv("FREQ", "1"))
MESSAGE_COUNT = max(MESSAGE_COUNT, 5)
MY_HOST = os.getenv("MY_HOST", os.uname()[1])
def print_beginning():
print("---begin---")
def print_ending():
later = datetime.now()
print("generated %d messages in %d seconds" % (MESSAGE_COUNT, int((later - now).total_seconds())))
print("EPS: %d" % (MESSAGE_COUNT / (later - now).total_seconds()))
print("---end---")
def print_log(i):
log_meta = " ".join(["num:", str(i), "|", MY_HOST, "|", datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f"), "|"])
print(log_meta, "r"*(max(1, SIZE-len(log_meta))))
sleep(FREQ)
now = datetime.now()
i = 1
if MESSAGE_COUNT <= 0:
print_beginning()
while True:
print_log(i)
i += 1
else:
print_beginning()
while i <= MESSAGE_COUNT - 4:
print_log(i)
i += 1
print_ending()
while True:
sleep(60)
| 2.921875
| 3
|
tests/test_discretize.py
|
Arzik1987/wittgenstein
| 64
|
12779893
|
<filename>tests/test_discretize.py
import pandas as pd
import pytest
from wittgenstein.discretize import BinTransformer
def test_bin_ranges_are_flush():
df = pd.read_csv("credit.csv")
bin_transformer_ = BinTransformer()
bin_transformer_.fit(df)
for feat, bins in bin_transformer_.bins_.items():
prev_ceil = None
for bin in bin_transformer_._strs_to_intervals(bins):
floor, ceil = bin.left, bin.right
assert prev_ceil is None or floor == prev_ceil
prev_ceil = ceil
def test_each_bin_in_order():
df = pd.read_csv("credit.csv")
bin_transformer_ = BinTransformer()
bin_transformer_.fit(df)
for feat, bins in bin_transformer_.bins_.items():
bins = bin_transformer_._strs_to_intervals(bins)
assert bins == sorted(bins)
def test_boundless_min_max_bins():
df = pd.read_csv("credit.csv")
bin_transformer_ = BinTransformer()
bin_transformer_.fit(df)
for feat, bins in bin_transformer_.bins_.items():
prev_ceil = None
bins = bin_transformer_._strs_to_intervals(bins)
assert bins[0].left == float('-inf')
assert bins[-1].right == float('inf')
def test_fewer_bins_than_n_discretize_bins():
df = pd.read_csv("credit.csv")
for n in range(2, 20, 5):
bin_transformer_ = BinTransformer(n_discretize_bins=n)
bin_transformer_.fit(df)
for feat, bin_ranges in bin_transformer_.bins_.items():
assert len(bin_ranges) <= n
def test_no_bins():
old_df = pd.read_csv("credit.csv")
df = old_df.copy()
bin_transformer_ = BinTransformer(n_discretize_bins=0)
bin_transformer_.fit(df)
bin_transformer_.transform(df)
assert df.equals(old_df)
| 2.65625
| 3
|
geocoder/geolytica.py
|
termim/geocoder
| 1,506
|
12779894
|
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
import logging
from geocoder.base import OneResult, MultipleResultsQuery
class GeolyticaResult(OneResult):
def __init__(self, json_content):
# create safe shortcuts
self._standard = json_content.get('standard', {})
# proceed with super.__init__
super(GeolyticaResult, self).__init__(json_content)
@property
def lat(self):
lat = self.raw.get('latt', '').strip()
if lat:
return float(lat)
@property
def lng(self):
lng = self.raw.get('longt', '').strip()
if lng:
return float(lng)
@property
def postal(self):
return self.raw.get('postal', '').strip()
@property
def housenumber(self):
return self._standard.get('stnumber', '').strip()
@property
def street(self):
return self._standard.get('staddress', '').strip()
@property
def city(self):
return self._standard.get('city', '').strip()
@property
def state(self):
return self._standard.get('prov', '').strip()
@property
def address(self):
if self.street_number:
return u'{0} {1}, {2}'.format(self.street_number, self.route, self.locality)
elif self.route and self.route != 'un-known':
return u'{0}, {1}'.format(self.route, self.locality)
else:
return self.locality
class GeolyticaQuery(MultipleResultsQuery):
"""
Geocoder.ca
===========
A Canadian and US location geocoder.
API Reference
-------------
http://geocoder.ca/?api=1
"""
provider = 'geolytica'
method = 'geocode'
_URL = 'http://geocoder.ca'
_RESULT_CLASS = GeolyticaResult
_KEY_MANDATORY = False
def _build_params(self, location, provider_key, **kwargs):
params = {
'json': 1,
'locate': location,
'geoit': 'xml'
}
if 'strictmode' in kwargs:
params.update({'strictmode': kwargs.pop('strictmode')})
if 'strict' in kwargs:
params.update({'strict': kwargs.pop('strict')})
if 'auth' in kwargs:
params.update({'auth': kwargs.pop('auth')})
return params
def _adapt_results(self, json_response):
return [json_response]
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
g = GeolyticaQuery('1552 Payette dr., Ottawa')
g.debug()
| 2.40625
| 2
|
src/main/python/test_support/testconfig.py
|
shuangshuangwang/SMV
| 0
|
12779895
|
<gh_stars>0
#
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from pyspark import SparkContext
from pyspark.sql import HiveContext
from smv.smvapp import SmvApp
class TestConfig(object):
smvApp = None
@classmethod
def setSmvApp(cls, app):
"""Set the canonical SmvApp
Spark context and sqlContext will be retrieved from this SmvApp.
This SmvApp will also be restored as the singleton after tests are
run.
"""
cls.smvApp = app
cls.sqlc = app.sqlContext
cls.sc = app.sc
@classmethod
def originalSmvApp(cls):
return cls.smvApp
# shared SparkContext
@classmethod
def sparkContext(cls):
if not hasattr(cls, 'sc'):
cls.sc = SparkContext(appName="SMV Python Tests")
return cls.sc
# shared HiveContext
@classmethod
def sqlContext(cls):
if not hasattr(cls, 'sqlc'):
cls.sqlc = HiveContext(cls.sparkContext())
return cls.sqlc
# smv args specified via command line
@classmethod
def smv_args(cls):
if not hasattr(cls, '_smv_args'):
cls.parse_args()
return cls._smv_args
# test names specified via command line
@classmethod
def test_names(cls):
if not hasattr(cls, '_test_names'):
cls.parse_args()
return cls._test_names
# Parse argv to split up the the smv args and the test names
@classmethod
def parse_args(cls):
args = sys.argv[1:]
test_names = []
smv_args = []
while(len(args) > 0):
next_arg = args.pop(0)
if(next_arg == "-t"):
test_names.append( args.pop(0) )
else:
smv_args.append(next_arg)
cls._test_names = test_names
cls._smv_args = smv_args
| 1.96875
| 2
|
pydgn/data/util.py
|
terragord7/PyDGN
| 1
|
12779896
|
<reponame>terragord7/PyDGN<gh_stars>1-10
import inspect
import os
import os.path as osp
import warnings
from typing import Optional, Callable
import torch
from torch_geometric.transforms import Compose
from pydgn.data.dataset import DatasetInterface
from pydgn.experiment.util import s2c
def get_or_create_dir(path: str) -> str:
r"""
Creates directories associated to the specified path if they are missing, and it returns the path string.
Args:
path (str): the path
Returns:
the same path as the given argument
"""
if not os.path.exists(path):
os.makedirs(path)
return path
def check_argument(cls: object, arg_name: str) -> bool:
r"""
Checks whether ``arg_name`` is in the signature of a method or class.
Args:
cls (object): the class to inspect
arg_name (str): the name to look for
Returns:
``True`` if the name was found, ``False`` otherwise
"""
sign = inspect.signature(cls)
return arg_name in sign.parameters.keys()
def filter_adj(edge_index: torch.Tensor, edge_attr: torch.Tensor, mask: torch.Tensor) -> (torch.Tensor,
Optional[torch.Tensor]):
r"""
Adapted from https://pytorch-geometric.readthedocs.io/en/latest/_modules/torch_geometric/utils/dropout.html.
Does the same thing but with a different signature
Args:
edge_index (torch.Tensor): the usual PyG matrix of edge indices
edge_attr (torch.Tensor): the usual PyG matrix of edge attributes
mask (torch.Tensor): boolean tensor with edges to filter
Returns:
a tuple (filtered edge index, filtered edge attr or ``None`` if ``edge_attr`` is ``None``)
"""
row, col = edge_index
filtered_edge_index = row[mask], col[mask]
return filtered_edge_index, None if edge_attr is None else edge_attr[mask]
def preprocess_data(options: dict):
r"""
One of the main functions of the PyDGN library. Used to create the dataset and its associated files that ensure
the correct functioning of the data loading steps.
Args:
options (dict): a dictionary of dataset/splitter arguments as defined in the data configuration file used.
"""
data_info = options.pop("dataset")
if "class_name" not in data_info:
raise ValueError("You must specify 'class_name' in your dataset.")
dataset_class = s2c(data_info.pop("class_name"))
dataset_args = data_info.pop("args")
data_root = data_info.pop("root")
################################
# more experimental stuff here
dataset_kwargs = data_info.pop('other_args', {})
pre_transforms = None
pre_transforms_opt = data_info.pop("pre_transform", None)
if pre_transforms_opt is not None:
pre_transforms = []
for pre_transform in pre_transforms_opt:
pre_transform_class = s2c(pre_transform["class_name"])
args = pre_transform.pop("args", {})
pre_transforms.append(pre_transform_class(**args))
dataset_kwargs.update(pre_transform=Compose(pre_transforms))
pre_filters = None
pre_filters_opt = data_info.pop("pre_filter", None)
if pre_filters_opt is not None and check_argument(dataset_class, "pre_filter"):
pre_filters = []
for pre_filter in pre_filters_opt:
pre_filter_class = s2c(pre_filter["class_name"])
args = pre_filter.pop("args", {})
pre_filters.append(pre_filter_class(**args))
dataset_kwargs.update(pre_filter=Compose(pre_filters))
transforms_opt = data_info.pop("transform", None)
if transforms_opt is not None:
transforms = []
for transform in transforms_opt:
transform_class = s2c(transform["class_name"])
args = transform.pop("args", {})
transforms.append(transform_class(**args))
dataset_kwargs.update(transform=Compose(transforms))
dataset_args.update(dataset_kwargs)
################################
dataset = dataset_class(**dataset_args)
assert hasattr(dataset, 'name'), "Dataset instance should have a name attribute!"
# Store dataset additional arguments in a separate file
kwargs_folder = osp.join(data_root, dataset.name, 'processed')
kwargs_path = osp.join(kwargs_folder, 'dataset_kwargs.pt')
get_or_create_dir(kwargs_folder)
torch.save(dataset_args, kwargs_path)
# Process data splits
splits_info = options.pop("splitter")
splits_root = splits_info.pop("root")
if "class_name" not in splits_info:
raise ValueError("You must specify 'class_name' in your splitter.")
splitter_class = s2c(splits_info.pop("class_name"))
splitter_args = splits_info.pop("args")
splitter = splitter_class(**splitter_args)
splits_dir = get_or_create_dir(osp.join(splits_root, dataset.name))
splits_path = osp.join(splits_dir,
f"{dataset.name}_outer{splitter.n_outer_folds}_inner{splitter.n_inner_folds}.splits")
if not os.path.exists(splits_path):
has_targets, targets = splitter.get_graph_targets(dataset)
# The splitter is in charge of eventual stratifications
splitter.split(dataset, targets=targets if has_targets else None)
splitter.save(splits_path)
else:
print("Data splits are already present, I will not overwrite them.")
def load_dataset(data_root: str, dataset_name:str, dataset_class: Callable[...,DatasetInterface]) -> DatasetInterface:
r"""
Loads the dataset using the ``dataset_kwargs.pt`` file created when parsing the data config file.
Args:
data_root (str): path of the folder that contains the dataset folder
dataset_name (str): name of the dataset (same as the name of the dataset folder that has been already created)
dataset_class (Callable[..., :class:`~pydgn.data.dataset.DatasetInterface`]): the class of the dataset to instantiate with the parameters stored in the ``dataset_kwargs.pt`` file.
Returns:
a :class:`~pydgn.data.dataset.DatasetInterface` object
"""
# Load arguments
kwargs_path = osp.join(data_root, dataset_name, 'processed', 'dataset_kwargs.pt')
dataset_args = torch.load(kwargs_path)
# Overwrite original data_root field, which may have changed
dataset_args['root'] = data_root
with warnings.catch_warnings():
# suppress PyG warnings
warnings.simplefilter("ignore")
dataset = dataset_class(**dataset_args)
return dataset
| 2.515625
| 3
|
scripts/create_model_structure.py
|
christophstach/htw-icw1-implementation
| 0
|
12779897
|
<reponame>christophstach/htw-icw1-implementation
from discrimnator import Discriminator
from generator import Generator
latent_dimension = 128
image_size = 64
image_channels = 3
generator = Generator(1, image_channels, latent_dimension)
discriminator = Discriminator(1, image_channels)
print(generator)
print(discriminator)
| 2.15625
| 2
|
src/QTableWidget/tablewidget.py
|
Subdue0/pyqt5-demo
| 0
|
12779898
|
<filename>src/QTableWidget/tablewidget.py
import sys
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import *
from FormPageBar.form_page_bar import FormPageBar
from FormPageBar.ui_form_page_bar import Ui_FormPageBar
class TableWidget(QTableWidget):
def __init__(self, parent=None):
super(TableWidget, self).__init__(parent)
self.createContextMenu()
# 必须将ContextMenuPolicy设置为Qt.CustomContextMenu,否则无法使用customContextMenuRequested信号
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showContextMenu)
# 设置选择行为,以行为单位
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.history_record = {'add': [], 'del': []}
# 创建上下文菜单
def createContextMenu(self):
self.contextMenu = QMenu(self)
# 第一个参数加图标addAction(const QIcon &icon, const QString &text)
self.add = self.contextMenu.addAction('添加')
self.delete = self.contextMenu.addAction('删除')
self.undo = self.contextMenu.addAction('撤销')
self.redo = self.contextMenu.addAction('重做')
self.refresh = self.contextMenu.addAction('刷新')
self.add.triggered.connect(self.addRow)
self.delete.triggered.connect(self.delRow)
self.refresh.triggered.connect(self.updateRowHeader)
self.redo.triggered.connect(self.clearForm)
self.undo.triggered.connect(self.undoDelOpt)
# 菜单显示前,初始化菜单,并且将它移动到鼠标点击的位置
def showContextMenu(self):
# 查看历史记录,没有记录则禁用,否则可以使用
opt_times = len(self.history_record['del'])
if opt_times:
self.undo.setEnabled(True)
else:
self.undo.setEnabled(False)
if len(self.selectedRanges()):
self.add.setEnabled(True)
self.delete.setEnabled(True)
else:
self.add.setEnabled(False)
self.delete.setEnabled(False)
self.contextMenu.move(QtGui.QCursor.pos())
self.contextMenu.show()
def recordData(self, opt, data=None):
if opt == 'add':
print('添加')
elif opt == 'del':
print('test')
else:
print('tablewidget.TableWidget.recordDate(self, opt),参数opt为"add"/"del"')
sys.exit(-1)
def recordLineData(self, row_num, selected_row_opt_data):
row_num = str(row_num)
sum_col = self.getRowColumm()[1]
# 存储行数据
single_row_opt_data = []
for j in range(sum_col):
item_data = self.item(int(row_num), j).text()
single_row_opt_data.append(item_data)
selected_row_opt_data[row_num] = single_row_opt_data
# 撤销删除操作,也就是插入操作,和删除操作相反,插入顺序必须按从小到大插入,否则非常难处理,因为索引之间相互影响。
def undoDelOpt(self):
recent_opt_data = self.history_record['del'].pop()
del_row_num = list(recent_opt_data.keys())
for i in range(len(del_row_num)):
min_row_num = int(min(del_row_num))
# 插入最小行
self.insertRow(min_row_num)
# 指定行头的item
header_item = QTableWidgetItem()
self.setVerticalHeaderItem(min_row_num, header_item)
# 还原最小行的表格数据
for col_num in range(len(recent_opt_data[str(min_row_num)])):
each_item_data = recent_opt_data[str(min_row_num)][col_num]
# 指定最小行号的item,并设置文本
item = QTableWidgetItem()
item.setText(each_item_data)
self.setItem(min_row_num, col_num, item)
# 删除列表中的最小行
del_row_num.remove(str(min_row_num))
# 每一次撤销操作都会影响到所有表格的排版,故更新表格
self.updateRowHeader()
self.updateItemColorAlignment()
# 删除选中行,所有选中行的删除顺序,必须按行号从大到小删除,否则处理会非常麻烦,因为索引之间相互影响。
def delRow(self):
# 存储选中的行号
del_row_num = []
# 存储选中行的行号以及数据
selected_row_opt_data = {}
# clicks表示Ctrl非连续多选的单击次数
clicks = len(self.selectedRanges())
for i in range(clicks):
top = self.selectedRanges()[i].topRow()
bottom = self.selectedRanges()[i].bottomRow()
# 记录将要被删除的一个单行的元素对象的数据
if top == bottom:
del_row_num.append(top)
else:
# cycles表示使用Shift或单击鼠标拖拉连续选择的总数量
cycles = bottom - top + 1
# 记录将要被删除的一个多行的元素对象的数据
for j in range(cycles):
del_row_num.append(top+j)
selected_row_opt_data = {}
for i in range(len(del_row_num)):
max_row_num = max(del_row_num)
self.recordLineData(max_row_num, selected_row_opt_data)
self.removeRow(max_row_num)
del_row_num.remove(max_row_num)
self.history_record['del'].append(selected_row_opt_data)
# 每一次删除操作都会影响到所有表格的排版,故更新表格
self.updateRowHeader()
self.updateItemColorAlignment()
# 在当前行后添加新行
def addRow(self):
sum_row = self.getRowColumm()[0]
sum_col = self.getRowColumm()[1]
self.insertRow(sum_row)
# 指定行头的item
header_item = QTableWidgetItem()
self.setVerticalHeaderItem(sum_row, header_item)
# 还原最小行的表格数据
for col_num in range(sum_col):
# 指定插入的新行的item
item = QTableWidgetItem()
self.setItem(sum_row, col_num, item)
# 每一次添加操作都会影响到所有表格的排版,故更新表格
self.updateRowHeader()
self.updateItemColorAlignment()
self.editItem(self.item(sum_row, 0))
# 每次添加必须把之前的选择状态给清除了,否则无法编辑下一个
self.setCurrentItem(self.item(sum_row, 0), QtCore.QItemSelectionModel.Clear)
FormPageBar.setPageNum(2, 5)
print(FormPageBar.getPageNum())
self.form_page_num.setText('[1/4]页')
# self.pageBlockDisplay(5)
def getPageTotal(self):
sum_row = self.getRowColumm()[0]
form_page_total = int(sum_row/10) + 1
return form_page_total
def pageBlockDisplay(self, form_cur_page_num):
# 每次换页必须把之前的编辑状态给关闭了,否则导致乱七八糟的选择
self.closePersistentEditor(self.currentItem())
sum_row = self.getRowColumm()[0]
form_page_total = int(sum_row/10) + 1
# 全部隐藏
for i in range(sum_row):
self.setRowHidden(i, True)
# 部分显示
start = 10*form_cur_page_num - 10
# 最后一页的end就是总行数
if form_cur_page_num == form_page_total:
end = sum_row
else:
end = 10*form_cur_page_num
for i in range(start, end):
self.setRowHidden(i, False)
# 更新行头显示
def updateRowHeader(self):
sum_row = self.getRowColumm()[0]
for each_row in range(sum_row):
self.verticalHeaderItem(each_row).setTextAlignment(QtCore.Qt.AlignCenter)
self.verticalHeaderItem(each_row).setText('%s' %(each_row + 1))
# 清空表格文本
def clearForm(self):
row_sum, col_sum = self.getRowColumm()
for i in range(row_sum):
for j in range(col_sum):
item = self.item(i, j)
item.setText('')
# 获得行列数量
def getRowColumm(self):
return self.rowCount(), self.columnCount()
# 更新所有单元格的背景颜色
def updateItemColorAlignment(self):
row_sum, col_sum = self.getRowColumm()
for i in range(row_sum):
if not i%2:
for j in range(col_sum):
item = self.item(i, j)
item.setBackground(QtGui.QColor(250, 250, 250))
item.setTextAlignment(QtCore.Qt.AlignCenter)
else:
for j in range(col_sum):
item = self.item(i, j)
item.setBackground(QtGui.QColor(255, 255, 255))
item.setTextAlignment(QtCore.Qt.AlignCenter)
# 设置所有单元格的背景颜色和文本对齐,务必在设置好行列以后调用
def setItemColorAlignment(self):
row_sum = self.getRowColumm()[0]
col_sum = self.getRowColumm()[1]
for i in range(row_sum):
if not i%2:
for j in range(col_sum):
item = QTableWidgetItem()
item.setBackground(QtGui.QColor(250, 250, 250))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.setItem(i, j, item)
else:
for j in range(col_sum):
item = QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.setItem(i, j, item)
| 2.109375
| 2
|
streamlit_app.py
|
CMU-IDS-2022/assignment-2-the-entertainers
| 0
|
12779899
|
<filename>streamlit_app.py
# important libraries
import streamlit as st
import pandas as pd
import altair as alt
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import matplotlib
# Use the full page instead of a narrow central column
st.set_page_config(layout="wide")
# title of application
st.title("Recognizing Patterns in Songs")
# add caching so we load the data only once
@st.cache
def load_data():
# Load song dataset
csv_file = "features_30_sec.csv"
return pd.read_csv(csv_file)
@st.cache
def playAudio(genre, playlist):
# open and play the audio file
audio_file = open('genres_original/' + genre_selection + '/' + song_selection, 'rb')
audio_bytes = audio_file.read()
return audio_bytes
# load dataset
df = load_data()
# introduction and instructions on application
st.info(
"This application allows users to explore the question, “How can we categorize different songs based on their audio features? What patterns emerge?” Using the visualization, the user will be able to discover relationships and patterns among a song’s audio features, and how that is correlated to the song’s genre. ")
# default features for scatterplot
# multiselect to change features on X and Y axis for scatterplot (first choice is X and second choice is Y)
features = st.multiselect('Choose X and Y features to see how they correlate:',
("harmony_mean", "rms_mean", "chroma_stft_mean", "rolloff_mean", "tempo"),
default=["tempo", "chroma_stft_mean"])
if len(features) < 2:
feature_X = "tempo"
feature_Y = "chroma_stft_mean"
else:
feature_X = str(features[0])
feature_Y = str(features[1])
# dividing the application into columns
col1, col2 = st.columns((2, 1))
# brush (click and drag) for selecting specific values on chart
brush = alt.selection_interval(encodings=["x"])
# selection to allow highlight of genre when click on legend
selection = alt.selection_multi(fields=['label'], bind='legend')
# scatterplot showing the correlation between two features for all genres
scatterplot = alt.Chart(df).mark_circle(size=100).encode(
alt.X(feature_X, scale=alt.Scale(zero=False)),
alt.Y(feature_Y, scale=alt.Scale(zero=False)),
alt.Color('label:N', legend=alt.Legend(title="Genres")),
opacity=alt.condition(selection, alt.value(1), alt.value(0))
, tooltip=['filename', 'label', 'tempo', 'chroma_stft_mean']
).properties(
width=650, height=350
).transform_filter(
brush
).interactive().add_selection(
selection
)
features_pie = df.label.unique() #list(set(feature_X + feature_Y))
nBlues = df[df.label == 'blues'].shape[0]
nClassical = df[df.label == 'classical'].shape[0]
nCountry = df[df.label == 'country'].shape[0]
nDisco = df[df.label == 'disco'].shape[0]
nHiphop = df[df.label == 'hiphop'].shape[0]
nJazz = df[df.label == 'jazz'].shape[0]
nMetal = df[df.label == 'metal'].shape[0]
nPop = df[df.label == 'pop'].shape[0]
nReggae = df[df.label == 'reggae'].shape[0]
nRock = df[df.label == 'rock'].shape[0]
value_list = [nBlues, nClassical,nCountry,nDisco,nHiphop,nJazz,nMetal,nPop,nReggae,nRock]
# facet charts that show grid of scatterplots for each genre
facet = alt.Chart(df).mark_point().encode(
alt.X('tempo:Q'),
alt.Y('chroma_stft_mean:Q'),
alt.Color('label:N', legend=alt.Legend(title="Genres")), facet=alt.Facet('label:N', columns=5),
tooltip=['filename', 'label', 'chroma_stft_mean', 'tempo']
).properties(
width=110,
height=110
).transform_filter(brush).interactive()
# set overview intiall to the scatterplot visualization
overview = scatterplot
# creating strip plot visualizations
# strip plot visualization for chroma_stft_mean
chart = alt.Chart(df).mark_tick().encode(
x='chroma_stft_mean', color=alt.condition(brush, "label", alt.value("white"), legend=None),
tooltip=['filename', 'label', 'chroma_stft_mean']
).properties(
width=650, height=45
).add_selection(brush)
# strip plot visualization for rms_mean
chart2 = alt.Chart(df).mark_tick().encode(
x='rms_mean', color=alt.condition(brush, "label", alt.value("white")),
tooltip=['filename', 'label', 'rms_mean'],
).properties(
width=650, height=45
).add_selection(brush)
# strip plot visualization for rolloff_mean
chart3 = alt.Chart(df).mark_tick().encode(
x='rolloff_mean', color=alt.condition(brush, "label", alt.value("white")),
tooltip=['filename', 'label', 'rolloff_mean']
).properties(
width=650, height=45
).add_selection(brush)
# strip plot visualization for harmony_mean
chart4 = alt.Chart(df).mark_tick().encode(
x='harmony_mean', color=alt.condition(brush, "label", alt.value("white")),
tooltip=['filename', 'label', 'harmony_mean']
).properties(
width=650, height=45
).add_selection(brush)
# strip plot visualization for tempo
chart5 = alt.Chart(df).mark_tick().encode(
x='tempo', color=alt.condition(brush, "label", alt.value("white")),
tooltip=['filename', 'label', 'tempo']
).properties(
width=650, height=45
).add_selection(brush)
# elements that go into column 1 (scatterplot and strip plots)
with col1:
st.subheader("Analyze the correlation among genres: ")
# checkbox that changes the scatterplot to grid of scatterplots for eac genre
if st.checkbox("Click for Genre Breakdown"):
overview = facet
# create the scatterplot and strip plot visualizations in a vertical orientation
st.write(overview & chart & chart2 & chart3 & chart4 & chart5)
# elements that go into column 2 (songs selection, audio file, )
with col2:
st.subheader("Explore the songs: ")
# get list of genres from dataset
genres = df.label.unique()
genre_selection = st.selectbox("Pick a Genre", genres, 0)
# get all the songs in the selected genre
playlist = df[df['label'] == genre_selection].filename
song_selection = st.selectbox("Pick a Song", playlist, 0)
song = playAudio(genre_selection, song_selection)
st.audio(song, format='audio/wav')
# expander - when clicked, shows the written descriptions of the features
with st.expander("See description of features", expanded=True):
st.write("chroma_stft: represents information about the classification of pitch and signal structure, units = intensity")
st.write("rms: a metering tool that measures the average loudness of an audio track within a window of roughly 300 milliseconds, units=intensity")
st.write(
"rolloff: denotes the approximate low bass and high treble limits in a frequency response curve, with all frequencies between being those a speaker will play accurately")
st.write(
"harmony: the process by which the composition of individual sounds, or superpositions of sounds, is analysed by hearing.")
st.write("tempo: how fast or slow a piece of music is performed (units = beats per minute (BPM)")
with st.expander("Genre breakdown pie chart"):
st.subheader("Proportion of genres in the dataset:")
st.subheader("#")
st.subheader("#")
# create pie chart
fff, middlex = plt.subplots(nrows=1, ncols=1, figsize = (2,2))
colors = ('b', 'darkorange','r', 'c', 'g', 'gold', 'm', 'hotpink', 'sienna', 'silver')
middlex.pie(value_list, labels=features_pie, colors = colors, textprops={'fontsize': 6})
st.pyplot(fff)
st.markdown("---")
st.markdown("#")
# dividing the application into columns
col3, col4 = st.columns((2, 1))
#intercorrelation heatmap
with col3:
if True: #st.button('Intercorrelation Heatmap'):
with col4:
st.subheader("Intercorrelation heatmap:")
sorted_unique_features = sorted({'chroma_stft_mean', 'rms_mean', 'tempo', 'rolloff_mean', 'harmony_mean'})
selected_features = st.multiselect('Features to be included in heatmap:', sorted_unique_features, sorted_unique_features)
# Sidebar - Position selection
unique_genres = sorted(df.label.unique())
selected_genres = st.multiselect('Features to be included in correlation calculation:', unique_genres, unique_genres)
# Filtering data
df_selected = df[df.label.isin(selected_genres)]
df_selected = df_selected[selected_features]
# Heatmap
corr = df_selected.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask, k=1)] = True
if st.checkbox("Scale from minimum value ro maximum value"):
vmin = corr.min().min()
vmax = corr.max().max()
else:
vmin = int(-1)
vmax = int(1)
with col3:
with sns.axes_style("white"):
f, (leftAx, rightAx ) = plt.subplots(nrows = 1, ncols=2)
ax = sns.heatmap(corr, mask = mask, vmax= vmax, vmin = vmin, square=True, cmap = "rocket_r", ax = leftAx, cbar_kws={"shrink": 0.5}, annot=True,annot_kws = {"size": 8}, fmt = '.2f' )
rightAx.axis('off')
ax.set_xticklabels(ax.get_xmajorticklabels(), fontsize=8)
ax.set_yticklabels(ax.get_ymajorticklabels(), fontsize=8)
cbar = ax.collections[0].colorbar
cbar.ax.tick_params(labelsize=8)
st.pyplot(f)
st.set_option('deprecation.showPyplotGlobalUse', False)
# expander - when clicked, show the raw dataset
with st.expander("See the raw dataset"):
st.write(df)
st.markdown(
"This project was created by <NAME> and <NAME> for the [Interactive Data Science](https://dig.cmu.edu/ids2022) course at [Carnegie Mellon University](https://www.cmu.edu).")
| 4.03125
| 4
|
instructors/course-2015/errors_and_introspection/examples/exception0.py
|
mgadagin/PythonClass
| 46
|
12779900
|
<filename>instructors/course-2015/errors_and_introspection/examples/exception0.py
"""
Course: Exceptions
Topic: View an exception
Summary: What happens if there is an exception? What do we see?
Takeaways: raising an exception -> happens automatically if not handled.
"""
if __name__ == '__main__':
""" Catch an IOerror, which includes any time the file doesn't exist.
"""
# This file doesn't exist, oops!
filename = "does_not_exist.txt"
try:
f = open(filename, 'r')
f.read()
except IOError as e:
print("Error!", e) # we will look at this next example, ignore it for now
# Demonstrate that the application continues
print("...")
print("Continue on with the application...")
| 4.09375
| 4
|
libs/qncloud.py
|
sh-Joshua-python/swiper
| 0
|
12779901
|
from urllib.parse import urljoin
from qiniu import Auth,put_file
from swiper import config
def qn_upload(filename,filepath):
'''将文件上传至七牛云'''
#构建鉴权对象
qn = Auth(config.QN_ACCESS_KEY,config.QN_SECRET_KEY)
#生产上传 Token,有效期为1小时
token = qn.upload_token(config.QN_BUCKET,filename,3600)
#上传文件
ret,info = put_file(token,filename,filepath)
if info.ok():
url = urljoin(config.QN_BASEURL,filename)
return True,url
else:
return False,''
| 2.328125
| 2
|
src/ks33requests/schemas/s3_sub.py
|
tanbro/ks33requests
| 1
|
12779902
|
#!/usr/bin/env python
#
# Generated Mon Jun 10 11:49:52 2019 by generateDS.py version 2.32.0.
# Python 3.6.7 (default, Oct 22 2018, 11:32:17) [GCC 8.2.0]
#
# Command line options:
# ('-f', '')
# ('-o', 's3_api.py')
# ('-s', 's3_sub.py')
# ('--super', 's3_api')
#
# Command line arguments:
# schemas/AmazonS3.xsd
#
# Command line:
# generateDS.py -f -o "s3_api.py" -s "s3_sub.py" --super="s3_api" schemas/AmazonS3.xsd
#
# Current working directory (os.getcwd()):
# ks33requests
#
import os
import sys
from lxml import etree as etree_
from . import s3_api as supermod
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
parser = etree_.ETCompatXMLParser()
try:
if isinstance(infile, os.PathLike):
infile = os.path.join(infile)
except AttributeError:
pass
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = ''
#
# Data representation classes
#
class CreateBucketSub(supermod.CreateBucket):
def __init__(self, Bucket=None, AccessControlList=None, AWSAccessKeyId=None, Timestamp=None, Signature=None,
**kwargs_):
super(CreateBucketSub, self).__init__(Bucket, AccessControlList, AWSAccessKeyId, Timestamp, Signature,
**kwargs_)
supermod.CreateBucket.subclass = CreateBucketSub
# end class CreateBucketSub
class MetadataEntrySub(supermod.MetadataEntry):
def __init__(self, Name=None, Value=None, **kwargs_):
super(MetadataEntrySub, self).__init__(Name, Value, **kwargs_)
supermod.MetadataEntry.subclass = MetadataEntrySub
# end class MetadataEntrySub
class CreateBucketResponseSub(supermod.CreateBucketResponse):
def __init__(self, CreateBucketReturn=None, **kwargs_):
super(CreateBucketResponseSub, self).__init__(CreateBucketReturn, **kwargs_)
supermod.CreateBucketResponse.subclass = CreateBucketResponseSub
# end class CreateBucketResponseSub
class StatusSub(supermod.Status):
def __init__(self, Code=None, Description=None, **kwargs_):
super(StatusSub, self).__init__(Code, Description, **kwargs_)
supermod.Status.subclass = StatusSub
# end class StatusSub
class ResultSub(supermod.Result):
def __init__(self, Status=None, extensiontype_=None, **kwargs_):
super(ResultSub, self).__init__(Status, extensiontype_, **kwargs_)
supermod.Result.subclass = ResultSub
# end class ResultSub
class CreateBucketResultSub(supermod.CreateBucketResult):
def __init__(self, BucketName=None, **kwargs_):
super(CreateBucketResultSub, self).__init__(BucketName, **kwargs_)
supermod.CreateBucketResult.subclass = CreateBucketResultSub
# end class CreateBucketResultSub
class DeleteBucketSub(supermod.DeleteBucket):
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(DeleteBucketSub, self).__init__(Bucket, AWSAccessKeyId, Timestamp, Signature, Credential, **kwargs_)
supermod.DeleteBucket.subclass = DeleteBucketSub
# end class DeleteBucketSub
class DeleteBucketResponseSub(supermod.DeleteBucketResponse):
def __init__(self, DeleteBucketResponse_member=None, **kwargs_):
super(DeleteBucketResponseSub, self).__init__(DeleteBucketResponse_member, **kwargs_)
supermod.DeleteBucketResponse.subclass = DeleteBucketResponseSub
# end class DeleteBucketResponseSub
class BucketLoggingStatusSub(supermod.BucketLoggingStatus):
def __init__(self, LoggingEnabled=None, **kwargs_):
super(BucketLoggingStatusSub, self).__init__(LoggingEnabled, **kwargs_)
supermod.BucketLoggingStatus.subclass = BucketLoggingStatusSub
# end class BucketLoggingStatusSub
class LoggingSettingsSub(supermod.LoggingSettings):
def __init__(self, TargetBucket=None, TargetPrefix=None, TargetGrants=None, **kwargs_):
super(LoggingSettingsSub, self).__init__(TargetBucket, TargetPrefix, TargetGrants, **kwargs_)
supermod.LoggingSettings.subclass = LoggingSettingsSub
# end class LoggingSettingsSub
class GetBucketLoggingStatusSub(supermod.GetBucketLoggingStatus):
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(GetBucketLoggingStatusSub, self).__init__(Bucket, AWSAccessKeyId, Timestamp, Signature, Credential,
**kwargs_)
supermod.GetBucketLoggingStatus.subclass = GetBucketLoggingStatusSub
# end class GetBucketLoggingStatusSub
class GetBucketLoggingStatusResponseSub(supermod.GetBucketLoggingStatusResponse):
def __init__(self, GetBucketLoggingStatusResponse_member=None, **kwargs_):
super(GetBucketLoggingStatusResponseSub, self).__init__(GetBucketLoggingStatusResponse_member, **kwargs_)
supermod.GetBucketLoggingStatusResponse.subclass = GetBucketLoggingStatusResponseSub
# end class GetBucketLoggingStatusResponseSub
class SetBucketLoggingStatusSub(supermod.SetBucketLoggingStatus):
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None,
BucketLoggingStatus=None, **kwargs_):
super(SetBucketLoggingStatusSub, self).__init__(Bucket, AWSAccessKeyId, Timestamp, Signature, Credential,
BucketLoggingStatus, **kwargs_)
supermod.SetBucketLoggingStatus.subclass = SetBucketLoggingStatusSub
# end class SetBucketLoggingStatusSub
class SetBucketLoggingStatusResponseSub(supermod.SetBucketLoggingStatusResponse):
def __init__(self, **kwargs_):
super(SetBucketLoggingStatusResponseSub, self).__init__(**kwargs_)
supermod.SetBucketLoggingStatusResponse.subclass = SetBucketLoggingStatusResponseSub
# end class SetBucketLoggingStatusResponseSub
class GetObjectAccessControlPolicySub(supermod.GetObjectAccessControlPolicy):
def __init__(self, Bucket=None, Key=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None,
**kwargs_):
super(GetObjectAccessControlPolicySub, self).__init__(Bucket, Key, AWSAccessKeyId, Timestamp, Signature,
Credential, **kwargs_)
supermod.GetObjectAccessControlPolicy.subclass = GetObjectAccessControlPolicySub
# end class GetObjectAccessControlPolicySub
class GetObjectAccessControlPolicyResponseSub(supermod.GetObjectAccessControlPolicyResponse):
def __init__(self, GetObjectAccessControlPolicyResponse_member=None, **kwargs_):
super(GetObjectAccessControlPolicyResponseSub, self).__init__(GetObjectAccessControlPolicyResponse_member,
**kwargs_)
supermod.GetObjectAccessControlPolicyResponse.subclass = GetObjectAccessControlPolicyResponseSub
# end class GetObjectAccessControlPolicyResponseSub
class GetBucketAccessControlPolicySub(supermod.GetBucketAccessControlPolicy):
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(GetBucketAccessControlPolicySub, self).__init__(Bucket, AWSAccessKeyId, Timestamp, Signature, Credential,
**kwargs_)
supermod.GetBucketAccessControlPolicy.subclass = GetBucketAccessControlPolicySub
# end class GetBucketAccessControlPolicySub
class GetBucketAccessControlPolicyResponseSub(supermod.GetBucketAccessControlPolicyResponse):
def __init__(self, GetBucketAccessControlPolicyResponse_member=None, **kwargs_):
super(GetBucketAccessControlPolicyResponseSub, self).__init__(GetBucketAccessControlPolicyResponse_member,
**kwargs_)
supermod.GetBucketAccessControlPolicyResponse.subclass = GetBucketAccessControlPolicyResponseSub
# end class GetBucketAccessControlPolicyResponseSub
class GranteeSub(supermod.Grantee):
def __init__(self, extensiontype_=None, **kwargs_):
super(GranteeSub, self).__init__(extensiontype_, **kwargs_)
supermod.Grantee.subclass = GranteeSub
# end class GranteeSub
class UserSub(supermod.User):
def __init__(self, extensiontype_=None, **kwargs_):
super(UserSub, self).__init__(extensiontype_, **kwargs_)
supermod.User.subclass = UserSub
# end class UserSub
class AmazonCustomerByEmailSub(supermod.AmazonCustomerByEmail):
def __init__(self, EmailAddress=None, **kwargs_):
super(AmazonCustomerByEmailSub, self).__init__(EmailAddress, **kwargs_)
supermod.AmazonCustomerByEmail.subclass = AmazonCustomerByEmailSub
# end class AmazonCustomerByEmailSub
class CanonicalUserSub(supermod.CanonicalUser):
def __init__(self, ID=None, DisplayName=None, **kwargs_):
super(CanonicalUserSub, self).__init__(ID, DisplayName, **kwargs_)
supermod.CanonicalUser.subclass = CanonicalUserSub
# end class CanonicalUserSub
class GroupSub(supermod.Group):
def __init__(self, URI=None, **kwargs_):
super(GroupSub, self).__init__(URI, **kwargs_)
supermod.Group.subclass = GroupSub
# end class GroupSub
class GrantSub(supermod.Grant):
def __init__(self, Grantee=None, Permission=None, **kwargs_):
super(GrantSub, self).__init__(Grantee, Permission, **kwargs_)
supermod.Grant.subclass = GrantSub
# end class GrantSub
class AccessControlListSub(supermod.AccessControlList):
def __init__(self, Grant=None, **kwargs_):
super(AccessControlListSub, self).__init__(Grant, **kwargs_)
supermod.AccessControlList.subclass = AccessControlListSub
# end class AccessControlListSub
class CreateBucketConfigurationSub(supermod.CreateBucketConfiguration):
def __init__(self, LocationConstraint=None, **kwargs_):
super(CreateBucketConfigurationSub, self).__init__(LocationConstraint, **kwargs_)
supermod.CreateBucketConfiguration.subclass = CreateBucketConfigurationSub
# end class CreateBucketConfigurationSub
class LocationConstraintSub(supermod.LocationConstraint):
def __init__(self, valueOf_=None, **kwargs_):
super(LocationConstraintSub, self).__init__(valueOf_, **kwargs_)
supermod.LocationConstraint.subclass = LocationConstraintSub
# end class LocationConstraintSub
class AccessControlPolicySub(supermod.AccessControlPolicy):
def __init__(self, Owner=None, AccessControlList=None, **kwargs_):
super(AccessControlPolicySub, self).__init__(Owner, AccessControlList, **kwargs_)
supermod.AccessControlPolicy.subclass = AccessControlPolicySub
# end class AccessControlPolicySub
class SetObjectAccessControlPolicySub(supermod.SetObjectAccessControlPolicy):
def __init__(self, Bucket=None, Key=None, AccessControlList=None, AWSAccessKeyId=None, Timestamp=None,
Signature=None, Credential=None, **kwargs_):
super(SetObjectAccessControlPolicySub, self).__init__(Bucket, Key, AccessControlList, AWSAccessKeyId, Timestamp,
Signature, Credential, **kwargs_)
supermod.SetObjectAccessControlPolicy.subclass = SetObjectAccessControlPolicySub
# end class SetObjectAccessControlPolicySub
class SetObjectAccessControlPolicyResponseSub(supermod.SetObjectAccessControlPolicyResponse):
def __init__(self, **kwargs_):
super(SetObjectAccessControlPolicyResponseSub, self).__init__(**kwargs_)
supermod.SetObjectAccessControlPolicyResponse.subclass = SetObjectAccessControlPolicyResponseSub
# end class SetObjectAccessControlPolicyResponseSub
class SetBucketAccessControlPolicySub(supermod.SetBucketAccessControlPolicy):
def __init__(self, Bucket=None, AccessControlList=None, AWSAccessKeyId=None, Timestamp=None, Signature=None,
Credential=None, **kwargs_):
super(SetBucketAccessControlPolicySub, self).__init__(Bucket, AccessControlList, AWSAccessKeyId, Timestamp,
Signature, Credential, **kwargs_)
supermod.SetBucketAccessControlPolicy.subclass = SetBucketAccessControlPolicySub
# end class SetBucketAccessControlPolicySub
class SetBucketAccessControlPolicyResponseSub(supermod.SetBucketAccessControlPolicyResponse):
def __init__(self, **kwargs_):
super(SetBucketAccessControlPolicyResponseSub, self).__init__(**kwargs_)
supermod.SetBucketAccessControlPolicyResponse.subclass = SetBucketAccessControlPolicyResponseSub
# end class SetBucketAccessControlPolicyResponseSub
class GetObjectSub(supermod.GetObject):
def __init__(self, Bucket=None, Key=None, GetMetadata=None, GetData=None, InlineData=None, AWSAccessKeyId=None,
Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(GetObjectSub, self).__init__(Bucket, Key, GetMetadata, GetData, InlineData, AWSAccessKeyId, Timestamp,
Signature, Credential, **kwargs_)
supermod.GetObject.subclass = GetObjectSub
# end class GetObjectSub
class GetObjectResponseSub(supermod.GetObjectResponse):
def __init__(self, GetObjectResponse_member=None, **kwargs_):
super(GetObjectResponseSub, self).__init__(GetObjectResponse_member, **kwargs_)
supermod.GetObjectResponse.subclass = GetObjectResponseSub
# end class GetObjectResponseSub
class GetObjectResultSub(supermod.GetObjectResult):
def __init__(self, Status=None, Metadata=None, Data=None, LastModified=None, ETag=None, **kwargs_):
super(GetObjectResultSub, self).__init__(Status, Metadata, Data, LastModified, ETag, **kwargs_)
supermod.GetObjectResult.subclass = GetObjectResultSub
# end class GetObjectResultSub
class GetObjectExtendedSub(supermod.GetObjectExtended):
def __init__(self, Bucket=None, Key=None, GetMetadata=None, GetData=None, InlineData=None, ByteRangeStart=None,
ByteRangeEnd=None, IfModifiedSince=None, IfUnmodifiedSince=None, IfMatch=None, IfNoneMatch=None,
ReturnCompleteObjectOnConditionFailure=None, AWSAccessKeyId=None, Timestamp=None, Signature=None,
Credential=None, **kwargs_):
super(GetObjectExtendedSub, self).__init__(Bucket, Key, GetMetadata, GetData, InlineData, ByteRangeStart,
ByteRangeEnd, IfModifiedSince, IfUnmodifiedSince, IfMatch,
IfNoneMatch, ReturnCompleteObjectOnConditionFailure, AWSAccessKeyId,
Timestamp, Signature, Credential, **kwargs_)
supermod.GetObjectExtended.subclass = GetObjectExtendedSub
# end class GetObjectExtendedSub
class GetObjectExtendedResponseSub(supermod.GetObjectExtendedResponse):
def __init__(self, GetObjectResponse=None, **kwargs_):
super(GetObjectExtendedResponseSub, self).__init__(GetObjectResponse, **kwargs_)
supermod.GetObjectExtendedResponse.subclass = GetObjectExtendedResponseSub
# end class GetObjectExtendedResponseSub
class PutObjectSub(supermod.PutObject):
def __init__(self, Bucket=None, Key=None, Metadata=None, ContentLength=None, AccessControlList=None,
StorageClass=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(PutObjectSub, self).__init__(Bucket, Key, Metadata, ContentLength, AccessControlList, StorageClass,
AWSAccessKeyId, Timestamp, Signature, Credential, **kwargs_)
supermod.PutObject.subclass = PutObjectSub
# end class PutObjectSub
class PutObjectResponseSub(supermod.PutObjectResponse):
def __init__(self, PutObjectResponse_member=None, **kwargs_):
super(PutObjectResponseSub, self).__init__(PutObjectResponse_member, **kwargs_)
supermod.PutObjectResponse.subclass = PutObjectResponseSub
# end class PutObjectResponseSub
class PutObjectResultSub(supermod.PutObjectResult):
def __init__(self, ETag=None, LastModified=None, **kwargs_):
super(PutObjectResultSub, self).__init__(ETag, LastModified, **kwargs_)
supermod.PutObjectResult.subclass = PutObjectResultSub
# end class PutObjectResultSub
class PutObjectInlineSub(supermod.PutObjectInline):
def __init__(self, Bucket=None, Key=None, Metadata=None, Data=None, ContentLength=None, AccessControlList=None,
StorageClass=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(PutObjectInlineSub, self).__init__(Bucket, Key, Metadata, Data, ContentLength, AccessControlList,
StorageClass, AWSAccessKeyId, Timestamp, Signature, Credential,
**kwargs_)
supermod.PutObjectInline.subclass = PutObjectInlineSub
# end class PutObjectInlineSub
class PutObjectInlineResponseSub(supermod.PutObjectInlineResponse):
def __init__(self, PutObjectInlineResponse_member=None, **kwargs_):
super(PutObjectInlineResponseSub, self).__init__(PutObjectInlineResponse_member, **kwargs_)
supermod.PutObjectInlineResponse.subclass = PutObjectInlineResponseSub
# end class PutObjectInlineResponseSub
class DeleteObjectSub(supermod.DeleteObject):
def __init__(self, Bucket=None, Key=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None,
**kwargs_):
super(DeleteObjectSub, self).__init__(Bucket, Key, AWSAccessKeyId, Timestamp, Signature, Credential, **kwargs_)
supermod.DeleteObject.subclass = DeleteObjectSub
# end class DeleteObjectSub
class DeleteObjectResponseSub(supermod.DeleteObjectResponse):
def __init__(self, DeleteObjectResponse_member=None, **kwargs_):
super(DeleteObjectResponseSub, self).__init__(DeleteObjectResponse_member, **kwargs_)
supermod.DeleteObjectResponse.subclass = DeleteObjectResponseSub
# end class DeleteObjectResponseSub
class ListBucketSub(supermod.ListBucket):
def __init__(self, Bucket=None, Prefix=None, Marker=None, MaxKeys=None, Delimiter=None, AWSAccessKeyId=None,
Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(ListBucketSub, self).__init__(Bucket, Prefix, Marker, MaxKeys, Delimiter, AWSAccessKeyId, Timestamp,
Signature, Credential, **kwargs_)
supermod.ListBucket.subclass = ListBucketSub
# end class ListBucketSub
class ListBucketResponseSub(supermod.ListBucketResponse):
def __init__(self, ListBucketResponse_member=None, **kwargs_):
super(ListBucketResponseSub, self).__init__(ListBucketResponse_member, **kwargs_)
supermod.ListBucketResponse.subclass = ListBucketResponseSub
# end class ListBucketResponseSub
class ListVersionsResponseSub(supermod.ListVersionsResponse):
def __init__(self, ListVersionsResponse_member=None, **kwargs_):
super(ListVersionsResponseSub, self).__init__(ListVersionsResponse_member, **kwargs_)
supermod.ListVersionsResponse.subclass = ListVersionsResponseSub
# end class ListVersionsResponseSub
class ListEntrySub(supermod.ListEntry):
def __init__(self, Key=None, LastModified=None, ETag=None, Size=None, Owner=None, StorageClass=None, **kwargs_):
super(ListEntrySub, self).__init__(Key, LastModified, ETag, Size, Owner, StorageClass, **kwargs_)
supermod.ListEntry.subclass = ListEntrySub
# end class ListEntrySub
class VersionEntrySub(supermod.VersionEntry):
def __init__(self, Key=None, VersionId=None, IsLatest=None, LastModified=None, ETag=None, Size=None, Owner=None,
StorageClass=None, **kwargs_):
super(VersionEntrySub, self).__init__(Key, VersionId, IsLatest, LastModified, ETag, Size, Owner, StorageClass,
**kwargs_)
supermod.VersionEntry.subclass = VersionEntrySub
# end class VersionEntrySub
class DeleteMarkerEntrySub(supermod.DeleteMarkerEntry):
def __init__(self, Key=None, VersionId=None, IsLatest=None, LastModified=None, Owner=None, **kwargs_):
super(DeleteMarkerEntrySub, self).__init__(Key, VersionId, IsLatest, LastModified, Owner, **kwargs_)
supermod.DeleteMarkerEntry.subclass = DeleteMarkerEntrySub
# end class DeleteMarkerEntrySub
class PrefixEntrySub(supermod.PrefixEntry):
def __init__(self, Prefix=None, **kwargs_):
super(PrefixEntrySub, self).__init__(Prefix, **kwargs_)
supermod.PrefixEntry.subclass = PrefixEntrySub
# end class PrefixEntrySub
class ListBucketResultSub(supermod.ListBucketResult):
def __init__(self, Metadata=None, Name=None, Prefix=None, Marker=None, NextMarker=None, MaxKeys=None,
Delimiter=None, IsTruncated=None, Contents=None, CommonPrefixes=None, **kwargs_):
super(ListBucketResultSub, self).__init__(Metadata, Name, Prefix, Marker, NextMarker, MaxKeys, Delimiter,
IsTruncated, Contents, CommonPrefixes, **kwargs_)
supermod.ListBucketResult.subclass = ListBucketResultSub
# end class ListBucketResultSub
class ListVersionsResultSub(supermod.ListVersionsResult):
def __init__(self, Metadata=None, Name=None, Prefix=None, KeyMarker=None, VersionIdMarker=None, NextKeyMarker=None,
NextVersionIdMarker=None, MaxKeys=None, Delimiter=None, IsTruncated=None, Version=None,
DeleteMarker=None, CommonPrefixes=None, **kwargs_):
super(ListVersionsResultSub, self).__init__(Metadata, Name, Prefix, KeyMarker, VersionIdMarker, NextKeyMarker,
NextVersionIdMarker, MaxKeys, Delimiter, IsTruncated, Version,
DeleteMarker, CommonPrefixes, **kwargs_)
supermod.ListVersionsResult.subclass = ListVersionsResultSub
# end class ListVersionsResultSub
class ListAllMyBucketsSub(supermod.ListAllMyBuckets):
def __init__(self, AWSAccessKeyId=None, Timestamp=None, Signature=None, **kwargs_):
super(ListAllMyBucketsSub, self).__init__(AWSAccessKeyId, Timestamp, Signature, **kwargs_)
supermod.ListAllMyBuckets.subclass = ListAllMyBucketsSub
# end class ListAllMyBucketsSub
class ListAllMyBucketsResponseSub(supermod.ListAllMyBucketsResponse):
def __init__(self, ListAllMyBucketsResponse_member=None, **kwargs_):
super(ListAllMyBucketsResponseSub, self).__init__(ListAllMyBucketsResponse_member, **kwargs_)
supermod.ListAllMyBucketsResponse.subclass = ListAllMyBucketsResponseSub
# end class ListAllMyBucketsResponseSub
class ListAllMyBucketsEntrySub(supermod.ListAllMyBucketsEntry):
def __init__(self, Name=None, CreationDate=None, **kwargs_):
super(ListAllMyBucketsEntrySub, self).__init__(Name, CreationDate, **kwargs_)
supermod.ListAllMyBucketsEntry.subclass = ListAllMyBucketsEntrySub
# end class ListAllMyBucketsEntrySub
class ListAllMyBucketsResultSub(supermod.ListAllMyBucketsResult):
def __init__(self, Owner=None, Buckets=None, **kwargs_):
super(ListAllMyBucketsResultSub, self).__init__(Owner, Buckets, **kwargs_)
supermod.ListAllMyBucketsResult.subclass = ListAllMyBucketsResultSub
# end class ListAllMyBucketsResultSub
class ListAllMyBucketsListSub(supermod.ListAllMyBucketsList):
def __init__(self, Bucket=None, **kwargs_):
super(ListAllMyBucketsListSub, self).__init__(Bucket, **kwargs_)
supermod.ListAllMyBucketsList.subclass = ListAllMyBucketsListSub
# end class ListAllMyBucketsListSub
class PostResponseSub(supermod.PostResponse):
def __init__(self, Location=None, Bucket=None, Key=None, ETag=None, **kwargs_):
super(PostResponseSub, self).__init__(Location, Bucket, Key, ETag, **kwargs_)
supermod.PostResponse.subclass = PostResponseSub
# end class PostResponseSub
class CopyObjectSub(supermod.CopyObject):
def __init__(self, SourceBucket=None, SourceKey=None, DestinationBucket=None, DestinationKey=None,
MetadataDirective=None, Metadata=None, AccessControlList=None, CopySourceIfModifiedSince=None,
CopySourceIfUnmodifiedSince=None, CopySourceIfMatch=None, CopySourceIfNoneMatch=None,
StorageClass=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(CopyObjectSub, self).__init__(SourceBucket, SourceKey, DestinationBucket, DestinationKey,
MetadataDirective, Metadata, AccessControlList, CopySourceIfModifiedSince,
CopySourceIfUnmodifiedSince, CopySourceIfMatch, CopySourceIfNoneMatch,
StorageClass, AWSAccessKeyId, Timestamp, Signature, Credential, **kwargs_)
supermod.CopyObject.subclass = CopyObjectSub
# end class CopyObjectSub
class CopyObjectResponseSub(supermod.CopyObjectResponse):
def __init__(self, CopyObjectResult=None, **kwargs_):
super(CopyObjectResponseSub, self).__init__(CopyObjectResult, **kwargs_)
supermod.CopyObjectResponse.subclass = CopyObjectResponseSub
# end class CopyObjectResponseSub
class CopyObjectResultSub(supermod.CopyObjectResult):
def __init__(self, LastModified=None, ETag=None, **kwargs_):
super(CopyObjectResultSub, self).__init__(LastModified, ETag, **kwargs_)
supermod.CopyObjectResult.subclass = CopyObjectResultSub
# end class CopyObjectResultSub
class RequestPaymentConfigurationSub(supermod.RequestPaymentConfiguration):
def __init__(self, Payer=None, **kwargs_):
super(RequestPaymentConfigurationSub, self).__init__(Payer, **kwargs_)
supermod.RequestPaymentConfiguration.subclass = RequestPaymentConfigurationSub
# end class RequestPaymentConfigurationSub
class VersioningConfigurationSub(supermod.VersioningConfiguration):
def __init__(self, Status=None, MfaDelete=None, **kwargs_):
super(VersioningConfigurationSub, self).__init__(Status, MfaDelete, **kwargs_)
supermod.VersioningConfiguration.subclass = VersioningConfigurationSub
# end class VersioningConfigurationSub
class NotificationConfigurationSub(supermod.NotificationConfiguration):
def __init__(self, TopicConfiguration=None, **kwargs_):
super(NotificationConfigurationSub, self).__init__(TopicConfiguration, **kwargs_)
supermod.NotificationConfiguration.subclass = NotificationConfigurationSub
# end class NotificationConfigurationSub
class TopicConfigurationSub(supermod.TopicConfiguration):
def __init__(self, Topic=None, Event=None, **kwargs_):
super(TopicConfigurationSub, self).__init__(Topic, Event, **kwargs_)
supermod.TopicConfiguration.subclass = TopicConfigurationSub
# end class TopicConfigurationSub
def get_root_tag(node):
tag = supermod.Tag_pattern_.match(node.tag).groups()[-1]
rootClass = None
rootClass = supermod.GDSClassesMapping.get(tag)
if rootClass is None and hasattr(supermod, tag):
rootClass = getattr(supermod, tag)
return tag, rootClass
def parse(inFilename, silence=False):
parser = None
doc = parsexml_(inFilename, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = supermod.CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
pretty_print=True)
return rootObj
def parseEtree(inFilename, silence=False):
parser = None
doc = parsexml_(inFilename, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = supermod.CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
if sys.version_info.major == 2:
from StringIO import StringIO
else:
from io import BytesIO as StringIO
parser = None
doc = parsexml_(StringIO(inString), parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = supermod.CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"')
return rootObj
def parseLiteral(inFilename, silence=False):
parser = None
doc = parsexml_(inFilename, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = supermod.CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from s3_api import *\n\n')
sys.stdout.write('import s3_api as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
parse(infilename)
if __name__ == '__main__':
# import pdb; pdb.set_trace()
main()
| 2.09375
| 2
|
aiakos/__main__.py
|
aiakos/aiakos
| 4
|
12779903
|
<reponame>aiakos/aiakos
#!/usr/bin/env python
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "aiakos.settings")
from django.core.management import execute_from_command_line # isort:skip
execute_from_command_line(sys.argv)
| 1.289063
| 1
|
lib/googlecloudsdk/core/cli.py
|
IsaacHuang/google-cloud-sdk
| 0
|
12779904
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""A module to make it easy to set up and run CLIs in the Cloud SDK."""
import os.path
import subprocess
import sys
import httplib2
from oauth2client import client
from googlecloudsdk.calliope import cli as calliope
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.credentials import store as c_store
__all__ = ['CLILoader', 'GoogleCloudSDKPackageRoot', 'Credentials', 'Http']
class Error(exceptions.Error):
"""Exceptions for the cli module."""
class CannotRefreshAuthTokenError(Error):
"""An exception raised when the auth tokens fail to refresh."""
def __init__(self, msg):
auth_command = '$ gcloud auth login'
message = ('There was a problem refreshing your current auth tokens: '
'{0}. Please run\n {1}.'.format(msg, auth_command))
super(CannotRefreshAuthTokenError, self).__init__(message)
class NoHelpFoundError(Error):
"""Raised when a help file cannot be located."""
def GetHelp(help_dir):
"""Returns a function that can display long help.
Long help is displayed using the man utility if it's available on
the user's platform. If man is not available, a plain-text version
of help is written to standard out.
Args:
help_dir: str, The path to the directory containing help documents.
Returns:
func([str]), A function that can display help if help_dir exists,
otherwise None.
"""
def Help(path, default=None):
"""Displays help for the given subcommand.
This function first attempts to display help using the man utility.
If man is unavailable, a plain-text version of the help is printed
to standard out.
Args:
path: A path representing the subcommand for which help is being
requested (e.g., ['my-prog', 'my-subcommand'] if help is being
requested for "my-prog my-subcommand").
default: str, Text to print out if no help files can be found.
Raises:
HelpNotFound: If man is not available and no help exists for the
given subcommand. Note that if man is available and no help exists,
error reporting is deferred to man.
"""
base = '_'.join(path)
try:
exit_code = subprocess.call(
['man',
'-M', os.path.join(help_dir, 'man'), # Sets the man search path.
base,
])
if exit_code == 0:
return
else:
log.debug('man process returned with exit code %s', exit_code)
except OSError as e:
log.debug('There was a problem launching man: %s', e)
log.debug('Falling back to plain-text help.')
text_help_file_path = os.path.join(help_dir, 'text.long', base)
try:
with open(text_help_file_path) as f:
sys.stdout.write(f.read())
except IOError:
if default:
print default
else:
raise NoHelpFoundError(
'No manual entry for command: {0}'.format(' '.join(path)))
if help_dir and os.path.exists(help_dir):
return Help
else:
return None
def CLILoader(name, command_root_directory, allow_non_existing_modules=False,
version_func=None, help_dir=None):
"""Get a ready-to-go CLI for Cloud SDK tools.
Args:
name: str, The name of your CLI. Should probably be the same as the
executable name.
command_root_directory: str, The absolute path to the tools root.
allow_non_existing_modules: bool, If true, module directories that don't
exist will be ignored rather than cause errors.
version_func: func, Function to call with -v, --version.
help_dir: str, The path to the directory containing help documents or None
if the CLI does not support man pages.
Returns:
calliope.CLILoader, An object that will run the tools from the command
line.
"""
paths = config.Paths()
return calliope.CLILoader(
name=name,
command_root_directory=command_root_directory,
load_context=None,
logs_dir=paths.logs_dir,
allow_non_existing_modules=allow_non_existing_modules,
version_func=version_func,
help_func=GetHelp(help_dir),
)
def GoogleCloudSDKPackageRoot():
return config.GoogleCloudSDKPackageRoot()
def Credentials():
"""Get the currently active credentials.
This function loads account credentials via core.account property
of core.properties module.
These credentials will be refreshed before being returned, so it makes sense
to cache the value returned for short-lived programs.
Returns:
An active, valid credentials object.
Raises:
c_store.Error: If an error loading the credentials occurs.
"""
return c_store.Load()
def Http(auth=True, creds=None, timeout=None):
"""Get an httplib2.Http object for working with the Google API.
Args:
auth: bool, True if the http object returned should be authorized.
creds: oauth2client.client.Credentials, If auth is True and creds is not
None, use those credentials to authorize the httplib2.Http object.
timeout: double, The timeout in seconds to pass to httplib2. This is the
socket level timeout. If timeout is None, timeout is infinite.
Returns:
An authorized httplib2.Http object, or a regular httplib2.Http object if no
credentials are available.
"""
# TODO(user): Have retry-once-if-denied logic, to allow client tools to not
# worry about refreshing credentials.
http = httplib2.Http(timeout=timeout)
if auth:
if not creds:
creds = Credentials()
http = creds.authorize(http)
# Take this out for now because it interferes with apitools ability to
# refresh the token for batch requests. b/18192994
# http = _WrapRequest(http)
return http
def _WrapRequest(http):
"""Wraps the original http.request method with one that wraps an exception.
We need to do this because oauth2client does similar wrapping when you
authorize the http object. Because of this, a credential refresh error
can get raised wherever someone makes an http request. With no common place
to handle this exception, we do more wrapping here so we can convert it to
one of our typed exceptions.
Args:
http: The original http object.
Returns:
http, The same http object but with the request method wrapped.
"""
orig_request = http.request
def NewRequest(*args, **kwargs):
try:
return orig_request(*args, **kwargs)
except client.AccessTokenRefreshError as e:
log.debug('Exception caught during HTTP request: %s', e.message,
exc_info=True)
raise CannotRefreshAuthTokenError(e.message)
http.request = NewRequest
return http
| 2.46875
| 2
|
aicsimage/processing/flip.py
|
HelmholtzAI-Consultants-Munich/pytorch_fnet
| 16
|
12779905
|
<filename>aicsimage/processing/flip.py
# Author: <NAME> <<EMAIL>>
import numpy as np
from scipy.ndimage.measurements import center_of_mass
def get_flips(img, sec, axes=(-3, -2, -1)):
"""
Calculates which axes to flip in order to have the center of mass of the image
be located in the desired sector. Meant to be passed to flip()
:param img: image as an n-dimensional numpy array to perform the calculations on.
The image will not be modified by this function
:param sec: String containing '+' and '-', same length as 'axes'. Tells the function
which side of each axis the center of mass should be on, '+' meaning the upper half and
'-' meaning the lower half
>>> get_flips(img, "++-", axes=(-3, -2, -1))
This, for example, would mean to have the center of mass be on the upper half of the z axis
(index -3 for a CZYX image), the upper half of the y axis, and the lower half of the x axis
:param axes: List or tuple of integers, specifies which axes to calculate the needed flips for.
Default is the last three axes, meant to be the 3 spatial dimensions for a ZYX, CZYX, or TCZYX image.
Must be the same length as 'sec' parameter
:return: A list of integers, representing the indices of the axes to flip the image along
Should be passed to flip()
"""
if not isinstance(img, np.ndarray):
raise ValueError("img must be a numpy array")
com = center_of_mass(img)
if len(sec) != len(axes):
raise ValueError("sec and axes must be the same length")
# return object, list of axis indices to flip on
flips = []
for side, axis in zip(sec, axes):
try:
# if we want the center of mass on the upper half
if side == '+':
if com[axis] < (img.shape[axis] // 2):
flips.append(axis)
# if we want it on the lower half
elif side == '-':
if com[axis] > (img.shape[axis] // 2):
flips.append(axis)
else:
raise ValueError("Invalid sector char '{}', must be '+' or '-'".format(side))
except IndexError:
raise ValueError("Out of range axis value " + str(axis))
except TypeError:
raise ValueError("Invalid axis value " + str(axis) + ", must be an integer")
return flips
def flip(images, flips):
"""
Flips images based on the calculations from get_flips()
:param images: Either a single n-dimensional image as a numpy array or a list of them.
The images to flip
:param flips: The output from get_flips(), tells the function which axes to flip the images along
All images will be flipped the same way
:return: Either a single flipped copy of the input image, or a list of them in the same order that they
were passed in, depending on whether the 'images' parameter was a single picture or a list
"""
if isinstance(images, (list, tuple)):
return_list = True
image_list = images
else:
return_list = False
image_list = [images]
out = []
for img in image_list:
# probably the most I've type 'flip' in my life
flipped = img
for flip_axis in flips:
flipped = np.flip(flipped, flip_axis)
out.append(flipped.copy())
if return_list:
return out
else:
return out[0]
| 3.65625
| 4
|
tests/test_global_torque_driven_with_contact_ocp.py
|
Steakkk/bioptim-1
| 0
|
12779906
|
<reponame>Steakkk/bioptim-1
"""
Test for file IO.
It tests the results of an optimal control problem with torque_driven_with_contact problem type regarding the proper functioning of :
- the maximize/minimize_predicted_height_CoM objective
- the contact_forces_inequality constraint
- the non_slipping constraint
"""
import importlib.util
from pathlib import Path
import pytest
import numpy as np
from bioptim import Data, OdeSolver
from .utils import TestUtils
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK, OdeSolver.IRK])
def test_maximize_predicted_height_CoM(ode_solver):
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"maximize_predicted_height_CoM",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/maximize_predicted_height_CoM.py",
)
maximize_predicted_height_CoM = importlib.util.module_from_spec(spec)
spec.loader.exec_module(maximize_predicted_height_CoM)
ocp = maximize_predicted_height_CoM.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.5,
number_shooting_points=20,
use_actuators=False,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.7592028279017864)
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (160, 1))
np.testing.assert_almost_equal(g, np.zeros((160, 1)))
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.5, 0.5)))
np.testing.assert_almost_equal(q[:, -1], np.array((0.1189651, -0.0904378, -0.7999996, 0.7999996)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((1.2636414, -1.3010929, -3.6274687, 3.6274687)))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-22.1218282)))
np.testing.assert_almost_equal(tau[:, -1], np.array(0.2653957))
# save and load
TestUtils.save_and_load(sol, ocp, False)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK, OdeSolver.IRK])
def test_maximize_predicted_height_CoM_with_actuators(ode_solver):
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"maximize_predicted_height_CoM",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/maximize_predicted_height_CoM.py",
)
maximize_predicted_height_CoM = importlib.util.module_from_spec(spec)
spec.loader.exec_module(maximize_predicted_height_CoM)
ocp = maximize_predicted_height_CoM.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.5,
number_shooting_points=20,
use_actuators=True,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.21850679397314332)
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (160, 1))
np.testing.assert_almost_equal(g, np.zeros((160, 1)), decimal=6)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
if ode_solver == OdeSolver.IRK:
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.5, 0.5)))
np.testing.assert_almost_equal(q[:, -1], np.array((-0.2393758, 0.0612086, -0.0006739, 0.0006739)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(
qdot[:, -1], np.array((-4.87675667e-01, 3.28672149e-04, 9.75351556e-01, -9.75351556e-01))
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-0.5509092)))
np.testing.assert_almost_equal(tau[:, -1], np.array(-0.00506117))
else:
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.5, 0.5)))
np.testing.assert_almost_equal(q[:, -1], np.array((-0.2393758, 0.0612086, -0.0006739, 0.0006739)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(
qdot[:, -1], np.array((-4.8768219e-01, 3.2867302e-04, 9.7536459e-01, -9.7536459e-01))
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-0.550905)))
np.testing.assert_almost_equal(tau[:, -1], np.array(-0.0050623))
# save and load
TestUtils.save_and_load(sol, ocp, False)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK, OdeSolver.IRK])
def test_contact_forces_inequality_GREATER_THAN_constraint(ode_solver):
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"contact_forces_inequality_constraint",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/contact_forces_inequality_constraint.py",
)
contact_forces_inequality_GREATER_THAN_constraint = importlib.util.module_from_spec(spec)
spec.loader.exec_module(contact_forces_inequality_GREATER_THAN_constraint)
min_bound = 50
ocp = contact_forces_inequality_GREATER_THAN_constraint.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.3,
number_shooting_points=10,
min_bound=min_bound,
max_bound=np.inf,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14525621569048172)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
if ode_solver == OdeSolver.IRK:
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (100, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(-g[80:], -min_bound)
expected_pos_g = np.array(
[
[50.76334043],
[51.42154006],
[57.79496471],
[64.29700748],
[67.01987853],
[68.32305222],
[67.91820667],
[65.26711376],
[59.57312581],
[50.1847888],
[160.1560585],
[141.16683648],
[85.1060599],
[56.33412288],
[53.32765464],
[52.21769321],
[51.63001946],
[51.2579451],
[50.98768816],
[50.21989568],
]
)
np.testing.assert_almost_equal(g[80:], expected_pos_g)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.75, 0.75)))
np.testing.assert_almost_equal(q[:, -1], np.array((-0.34054772, 0.1341555, -0.00054332, 0.00054332)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(
qdot[:, -1], np.array((-2.01096899e00, 1.09261741e-03, 4.02193851e00, -4.02193851e00))
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-54.17110048)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-15.69344349)))
else:
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (100, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(-g[80:], -min_bound)
expected_pos_g = np.array(
[
[50.76491919],
[51.42493119],
[57.79007374],
[64.29551934],
[67.01905769],
[68.3225625],
[67.91793917],
[65.26700138],
[59.57311867],
[50.18463134],
[160.14834799],
[141.15361769],
[85.13345729],
[56.33535022],
[53.32684286],
[52.21679255],
[51.62923106],
[51.25728666],
[50.9871531],
[50.21972377],
]
)
np.testing.assert_almost_equal(g[80:], expected_pos_g)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.75, 0.75)))
np.testing.assert_almost_equal(q[:, -1], np.array((-0.34054748, 0.1341555, -0.0005438, 0.0005438)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((-2.01097559, 1.09352001e-03, 4.02195175, -4.02195175)))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-54.1684018)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-15.69338332)))
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol, ocp)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK, OdeSolver.IRK])
def test_contact_forces_inequality_LESSER_THAN_constraint(ode_solver):
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"contact_forces_inequality_constraint",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/contact_forces_inequality_constraint.py",
)
contact_forces_inequality_LESSER_THAN_constraint = importlib.util.module_from_spec(spec)
spec.loader.exec_module(contact_forces_inequality_LESSER_THAN_constraint)
max_bound = 100
ocp = contact_forces_inequality_LESSER_THAN_constraint.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.3,
number_shooting_points=10,
min_bound=-np.inf,
max_bound=max_bound,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14525619649247054)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.75, 0.75)))
np.testing.assert_almost_equal(
q[:, -1], np.array((-3.40655617e-01, 1.34155544e-01, -3.27530886e-04, 3.27530886e-04))
)
if ode_solver == OdeSolver.IRK:
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (100, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(g[80:], max_bound)
expected_non_zero_g = np.array(
[
[63.27209168],
[63.02302254],
[62.13840892],
[60.38286495],
[57.31035211],
[52.1969189],
[43.95984323],
[31.14447074],
[12.4527049],
[-6.20139005],
[99.0646825],
[98.87878575],
[98.64638238],
[98.3478478],
[97.94940411],
[97.3880652],
[96.53094583],
[95.03988984],
[91.72272481],
[77.29740256],
]
)
np.testing.assert_almost_equal(g[80:], expected_non_zero_g)
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(
qdot[:, -1], np.array((-2.86544932e00, 9.38791617e-04, 5.73089895e00, -5.73089895e00))
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-32.78911887)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-25.1705709)))
else:
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14525619649247054)
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (100, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(g[80:], max_bound)
expected_non_zero_g = np.array(
[
[63.27237842],
[63.02339946],
[62.13898369],
[60.38380769],
[57.31193141],
[52.19952395],
[43.9638679],
[31.14938032],
[12.45022537],
[-6.35179034],
[99.06328211],
[98.87711942],
[98.64440005],
[98.34550037],
[97.94667107],
[97.38505013],
[96.52820867],
[95.03979128],
[91.73734926],
[77.48803304],
]
)
np.testing.assert_almost_equal(g[80:], expected_non_zero_g)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((-2.86650427, 9.38827988e-04, 5.73300901, -5.73300901)))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-32.78862874)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-25.23729156)))
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol, ocp)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK, OdeSolver.IRK])
def test_non_slipping_constraint(ode_solver):
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"non_slipping_constraint",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/non_slipping_constraint.py",
)
non_slipping_constraint = importlib.util.module_from_spec(spec)
spec.loader.exec_module(non_slipping_constraint)
ocp = non_slipping_constraint.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.6,
number_shooting_points=10,
mu=0.005,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.23984490846250128)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.5, 0.5)))
np.testing.assert_almost_equal(q[:, -1], np.array((-0.02364845, 0.01211471, -0.44685185, 0.44685185)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((-0.08703131, 0.04170362, 0.1930144, -0.1930144)))
if ode_solver == OdeSolver.IRK:
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (120, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(-g[80:], 0)
expected_pos_g = np.array(
[
[8.74337995e01],
[8.74671258e01],
[8.75687834e01],
[8.77422815e01],
[8.79913159e01],
[8.83197846e01],
[8.87318042e01],
[8.92317303e01],
[8.98241984e01],
[9.05145023e01],
[4.63475930e01],
[4.63130361e01],
[4.62075073e01],
[4.60271955e01],
[4.57680917e01],
[4.54259739e01],
[4.49963905e01],
[4.44746352e01],
[4.38556794e01],
[4.31334131e01],
[1.33775343e00],
[6.04899683e-05],
[1.33773204e00],
[6.95785710e-05],
[1.33768173e00],
[8.11784388e-05],
[1.33759829e00],
[9.64764544e-05],
[1.33747653e00],
[1.17543268e-04],
[1.33730923e00],
[1.48352207e-04],
[1.33708435e00],
[1.97600315e-04],
[1.33677502e00],
[2.88636405e-04],
[1.33628619e00],
[5.12590351e-04],
[1.33466928e00],
[1.80987563e-03],
]
)
np.testing.assert_almost_equal(g[80:], expected_pos_g)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-14.33813755)), decimal=6)
np.testing.assert_almost_equal(tau[:, -1], np.array((-13.21317493)), decimal=6)
else:
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (120, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(-g[80:], 0)
expected_pos_g = np.array(
[
[8.74337995e01],
[8.74671258e01],
[8.75687834e01],
[8.77422814e01],
[8.79913157e01],
[8.83197844e01],
[8.87318039e01],
[8.92317298e01],
[8.98241976e01],
[9.05145013e01],
[4.63475930e01],
[4.63130361e01],
[4.62075073e01],
[4.60271956e01],
[4.57680919e01],
[4.54259742e01],
[4.49963909e01],
[4.44746357e01],
[4.38556802e01],
[4.31334141e01],
[1.33775343e00],
[6.04899894e-05],
[1.33773204e00],
[6.95785950e-05],
[1.33768173e00],
[8.11784641e-05],
[1.33759829e00],
[9.64764869e-05],
[1.33747653e00],
[1.17543301e-04],
[1.33730923e00],
[1.48352248e-04],
[1.33708435e00],
[1.97600363e-04],
[1.33677502e00],
[2.88636453e-04],
[1.33628619e00],
[5.12590377e-04],
[1.33466928e00],
[1.80987419e-03],
]
)
np.testing.assert_almost_equal(g[80:], expected_pos_g)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-14.33813755)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-13.21317493)))
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol, ocp)
| 2.453125
| 2
|
rostran/core/outputs.py
|
aliyun/alibabacloud-ros-tool-transformer
| 9
|
12779907
|
from .exceptions import InvalidTemplateOutput
class Output:
TYPES = (STRING, NUMBER, LIST, MAP, BOOLEAN) = (
"String",
"Number",
"CommaDelimitedList",
"Json",
"Boolean",
)
def __init__(self, name, value, description=None, condition=None):
self.name = name
self.value = value
self.description = description
self.condition = condition
class Outputs(dict):
def add(self, output: Output):
if output.name is None:
raise InvalidTemplateOutput(
name=output.name, reason="Parameter name should not be None"
)
self[output.name] = output
def as_dict(self) -> dict:
data = {}
for key, output in self.items():
value = {"Value": output.value}
if output.description is not None:
value.update({"Description": output.description})
if output.condition is not None:
value.update({"Condition": output.condition})
data[key] = value
return data
| 2.890625
| 3
|
gccaps/config/prediction.py
|
turab95/gccaps
| 15
|
12779908
|
prediction_epochs = 'val_f1_score'
"""Specification for which models (epochs) to select for prediction.
Either a list of epoch numbers or a string specifying how to select the
epochs. The valid string values are ``'val_acc'`` and ``'val_eer'``.
"""
at_threshold = 0.35
"""number: Number for thresholding audio tagging predictions.
A value of -1 indicates that thresholds should be loaded from disk.
See Also:
:func:`evaluation.compute_thresholds`
"""
sed_threshold = 0.6
"""number: Number for thresholding sound event detection predictions.
A value of -1 indicates that thresholds should be loaded from disk.
See Also:
:func:`evaluation.compute_thresholds`
"""
sed_dilation = 10
"""int: Dilation parameter for binarizing predictions.
See Also:
:func:`inference.binarize_predictions_3d`
"""
sed_erosion = 5
"""int: Erosion parameter for binarizing predictions.
See Also:
:func:`inference.binarize_predictions_3d`
"""
| 1.71875
| 2
|
Day 55 - Higher Lower Flask Game/server.py
|
atulmkamble/100DaysOfCode
| 2
|
12779909
|
<reponame>atulmkamble/100DaysOfCode<gh_stars>1-10
from flask import Flask
from random import randint
app = Flask(__name__)
guess = randint(0, 9)
@app.route('/')
def guess_number():
return '<h1>Guess a number between 0 and 9</h1>' \
'<img src="https://media.giphy.com/media/3o7aCSPqXE5C6T8tBC/giphy.gif">'
@app.route('/guess/<int:number>')
def user_guess(number):
# print(f'The guess is: {guess}')
if number < guess:
return '<h1 style="color: purple">Too low, try again!</h1>' \
'<img src="https://media.giphy.com/media/jD4DwBtqPXRXa/giphy.gif">'
elif number > guess:
return '<h1 style="color: red">Too high, try again!</h1>' \
'<img src="https://media.giphy.com/media/3o6ZtaO9BZHcOjmErm/giphy.gif">'
else:
return '<h1 style="color: green">You found me!</h1>' \
'<img src="https://media.giphy.com/media/4T7e4DmcrP9du/giphy.gif">'
if __name__ == '__main__':
app.run(debug=True)
| 3.21875
| 3
|
mlutils/pt/modules.py
|
linshaoxin-maker/taas
| 4
|
12779910
|
from os import path
import numpy as np
from torch import nn
import torch
def get_embedding(embedding_path=None,
embedding_np=None,
num_embeddings=0, embedding_dim=0, freeze=True, **kargs):
"""Create embedding from:
1. saved numpy vocab array, embedding_path, freeze
2. numpy embedding array, embedding_np, freeze
3. raw embedding n_vocab, embedding_dim
"""
if isinstance(embedding_path, str) and path.exists(embedding_path):
embedding_np = np.load(embedding_path)
if embedding_np is not None:
return nn.Embedding.from_pretrained(torch.Tensor(embedding_np), freeze=freeze)
return nn.Embedding(num_embeddings, embedding_dim, **kargs)
# extract last output in last time step
def extract_last_timestep(output, lengths, batch_first):
"""Get the output of last time step.
output: seq_len x batch_size x dim if not batch_first. Else batch_size x seq_len x dim
length: one dimensional torch.LongTensor of lengths in a batch.
"""
idx = (lengths - 1).view(-1, 1).expand(len(lengths), output.size(2))
time_dimension = 1 if batch_first else 0
idx = idx.unsqueeze(time_dimension)
if output.is_cuda:
idx = idx.cuda(output.data.get_device())
return output.gather(time_dimension, idx).squeeze(time_dimension)
| 2.5
| 2
|
usaspending_api/search/v2/urls_search.py
|
animatecitizen/usaspending-api
| 0
|
12779911
|
<reponame>animatecitizen/usaspending-api<filename>usaspending_api/search/v2/urls_search.py
from django.conf.urls import url
from usaspending_api.search.v2.views import search
from usaspending_api.search.v2.views.spending_by_category import SpendingByCategoryVisualizationViewSet
urlpatterns = [
url(r'^spending_over_time', search.SpendingOverTimeVisualizationViewSet.as_view()),
url(r'^spending_by_category', SpendingByCategoryVisualizationViewSet.as_view()),
url(r'^spending_by_geography', search.SpendingByGeographyVisualizationViewSet.as_view()),
url(r'^spending_by_award_count', search.SpendingByAwardCountVisualizationViewSet.as_view()),
url(r'^spending_by_award', search.SpendingByAwardVisualizationViewSet.as_view()),
url(r'^spending_by_transaction_count', search.SpendingByTransactionCountVisualizaitonViewSet.as_view()),
url(r'^spending_by_transaction', search.SpendingByTransactionVisualizationViewSet.as_view()),
url(r'^transaction_spending_summary', search.TransactionSummaryVisualizationViewSet.as_view())
]
| 1.648438
| 2
|
Amiga/loadseg.py
|
thanasisk/binja-amiga
| 5
|
12779912
|
# coding=utf-8
"""
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import binaryninja
from .amigahunk import AmigaHunk
from .constants import HUNKTYPES
class AmigaLoadSeg(AmigaHunk):
name :str = 'AmigaLoadSeg'
long_name :str = 'Amiga 500 LoadSeg format'
loadseg_magic :bytes = b"\x00\x00\x03\xf3"
def __init__(self, data)->None:
super().__init__(data)
if self.is_valid_for_data(self.data):
self.create_segments()
def create_segments(self)->None:
hunktypes :list = []
numhunks :int = 0
self.br.seek(0x08)
numhunks = self.br.read32be()
first_hunk :int = self.br.read32be()
last_hunk :int = self.br.read32be()
self.br.seek_relative(0x04)
binaryninja.log_debug("%d %d %d %d" % (len(self.data),numhunks, first_hunk, last_hunk))
for i in range(0, numhunks):
hunktypes.append(self.br.read32be())
self.parse_hunktype(hunktypes[i])
@classmethod
def is_valid_for_data(self, data)->bool:
header :bytes = data.read(0,8)
strings :bytes = header[4:8]
self.is_loadseg :bool = header[0:4] == b"\x00\x00\x03\xf3"
if strings != b"\x00\x00\x00\x00" and self.is_loadseg == True:
binaryninja.log_error("λ - Unsupported LOADSEG file")
return False
return self.is_loadseg
| 2.234375
| 2
|
pddm/envs/fetch/__init__.py
|
Ray006/PDDM_FetchEnv
| 0
|
12779913
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gym.envs.registration import register
register(
id='MB_FetchSlide-v1',
entry_point='pddm.envs.fetch.slide:FetchSlideEnv',
max_episode_steps=50,
)
register(
id='MB_FetchPush-v1',
entry_point='pddm.envs.fetch.push:FetchPushEnv',
max_episode_steps=50,
)
register(
id='MB_FetchPickAndPlace-v1',
entry_point='pddm.envs.fetch.pick_and_place:FetchPickAndPlaceEnv',
max_episode_steps=50,
)
register(
id='MB_FetchReach-v1',
entry_point='pddm.envs.fetch.reach:FetchReachEnv',
max_episode_steps=50,
)
| 1.71875
| 2
|
inconsistent_system_solver.py
|
AntonBazdyrev/SystemAnalysis_Lab2
| 0
|
12779914
|
<reponame>AntonBazdyrev/SystemAnalysis_Lab2
from numpy import linalg
import numpy as np
class InconsistentSystemSolver:
def __init__(self, method='conjugate'):
self.method = method if method in ['lstsq', 'conjugate'] else 'conjugate'
def conjugate_grad(self, A, b, x=None, eps=1e-5):
"""
Description
-----------
Solve a linear equation Ax = b with conjugate gradient method.
Parameters
----------
A: 2d numpy.array of positive semi-definite (symmetric) matrix
b: 1d numpy.array
x: 1d numpy.array of initial point
Returns
-------
1d numpy.array x such that Ax = b
"""
n = A.shape[1]
if not x:
x = np.ones(n)
r = A @ x - b
p = - r
r_k_norm = np.dot(r, r)
for i in range(2 * n):
Ap = np.dot(A, p)
alpha = r_k_norm / np.dot(p, Ap)
x += alpha * p
r += alpha * Ap
r_kplus1_norm = np.dot(r, r)
beta = r_kplus1_norm / r_k_norm
r_k_norm = r_kplus1_norm
if r_kplus1_norm < eps:
# print ('Itr:', i)
break
p = beta * p - r
return x
def nonlinear_conjugate_grad(self, A, b, eps=1e-5):
return self.conjugate_grad(A.T @ A, A.T @ b, eps=eps)
def lstsq(self, A, b):
return linalg.lstsq(A, b)[0]
def solve(self, A, b):
return self.lstsq(A, b) if self.method == 'lstsq' else self.nonlinear_conjugate_grad(A, b)
| 3.078125
| 3
|
JSONtemplate.py
|
JacobParrott/OccultationProfiler
| 4
|
12779915
|
#FORM A TEMPLATE JSON FILE
# Profile begining
JSONstart = '''
{
"version": "1.0",
"name": "Occultation Profile",
"items": [
'''
# empty string required to grow from
JSONiterated = ''
# this one has the comma on it. lenght of profile adjust the iteration count of this
JSONiterable = '''
{
"name": "Profile Iterate",
"center": "Mars",
"trajectory": {
"type": "FixedPoint",
"position": [
0.0,
0.0,
0.0
]
},
"bodyFrame": {
"type": "BodyFixed",
"body": "Mars"
},
"geometry": {
"type": "ParticleSystem",
"emitters": [
{
"texture": "gaussian.jpg",
"generator": {
"type": "Strip",
"states": []
},
"velocityVariation": 8,
"trace": 2,
"spawnRate": 20,
"lifetime": 30,
"startSize": 5,
"endSize": 10,
"colors": [
"#00ff00",
0.000,
"#ff0000",
0.00,
"#0000ff",
0.005,
"#fffff",
0.005
],
"startTime":"2020-01-01 12:00:00",
"endTime": "2020-01-01 14:30:00"
}
]
}
},
'''
# must be placed at the end of the json stack[ doesnt have a comma for the last item]
JSONend = '''
{
"name": "Final Profile [high in the ionosphere]",
"center": "Mars",
"trajectory": {
"type": "FixedPoint",
"position": [
0.0,
0.0,
0.0
]
},
"bodyFrame": {
"type": "BodyFixed",
"body": "Mars"
},
"geometry": {
"type": "ParticleSystem",
"emitters": [
{
"texture": "gaussian.jpg",
"generator": {
"type": "Strip",
"states": []
},
"velocityVariation": 2,
"trace": 2,
"spawnRate": 100,
"lifetime": 30,
"startSize": 5,
"endSize": 100,
"colors": [
"#111111",
0.0,
"#ffffff",
0.00,
"#111111",
0.045,
"#0000ff",
0.0
],
"startTime":"2020-01-01 12:00:00",
"endTime": "2020-01-01 14:30:00"
}
]
}
}
]
}
'''
| 2.28125
| 2
|
modules/lastline.py
|
nidsche/viper
| 0
|
12779916
|
lastlineKEY = ""
lastlineTOKEN = ""
lastlinePORTALACCOUNT = ""
import json
try:
import requests
HAVE_REQUESTS = True
except ImportError:
HAVE_REQUESTS = False
from viper.common.abstracts import Module
from viper.core.session import __sessions__
BASE_URL = 'https://analysis.lastline.com'
SUBMIT_URL = '/analysis/submit/file'
class LastLine(Module):
cmd = 'lastline'
description = 'Submit files and retrieve reports from LastLine (default will print short summary) '
authors = ['gelos']
def __init__(self):
super(LastLine, self).__init__()
self.parser.add_argument('-s', '--submit', action='store_true', help='Submit file to LastLine')
self.parser.add_argument('-r','--report', action='store_true', help='Get report from LastLine')
def run(self):
super(LastLine, self).run()
if self.args is None:
return
if not HAVE_REQUESTS:
self.log('error', "Missing dependency, install requests (`pip install requests`)")
return
if not __sessions__.is_set():
self.log('error', "No session opened")
return
if self.args.submit:
try:
file = {'file' : open(__sessions__.current.file.path, 'rb').read()}
data = {'key':lastlineKEY, 'api_token':lastlineTOKEN,'push_to_portal_account':lastlinePORTALACCOUNT}
response = requests.post(BASE_URL+SUBMIT_URL,data=data,files=file)
response = response.json()
if response['success'] == 0:
self.log('error',response['error'])
return
if response['success'] == 1:
self.log('info','Successfully submitted file to LastLine, task UUID: '+response['data']['task_uuid'])
return
except Exception as e:
self.log('error', "Failed performing request: {0}".format(e))
return
try:
data = {'key':lastlineKEY, 'api_token':lastlineTOKEN,'md5':__sessions__.current.file.md5,'push_to_portal_account':lastlinePORTALACCOUNT}
response = requests.post(BASE_URL+SUBMIT_URL,data=data)
response = response.json()
if response['success'] == 0:
self.log('error',response['error'])
return
if response['success'] == 1:
self.log('info', "LastLine Report:")
if self.args.report:
self.log('',json.dumps(response,indent=4,sort_keys=False))
return
#file malicious scoring
if 'score' in response['data']:
self.log('info','Malicious score: '+str(response['data']['score']))
if 'submission' in response['data']:
self.log('info','Submission date: '+str(response['data']['submission']))
#generating malicous activity list
if 'malicious_activity' in response['data']:
malicous_activity = []
i = 0
while(i < len(response['data']['malicious_activity'])):
malicous_activity.append([i,response['data']['malicious_activity'][i]])
i += 1
self.log('table', dict(header=['id', 'Malicious Activity'], rows=malicous_activity))
#generating url_summary list
if 'url_summary' in response['data']['report']:
url_summary = []
i = 0
while (i < len(response['data']['report']['url_summary'])):
url_summary.append([i,response['data']['report']['url_summary'][i]])
i += 1
self.log('table', dict(header=['id', 'URL Found'], rows=url_summary))
return
except Exception as e:
self.log('error', "Failed performing request: {0}".format(e))
| 2.4375
| 2
|
modality/sysml14/modelelements/__init__.py
|
bmjjr/modality
| 1
|
12779917
|
# -*- coding: utf-8 -*-
from .modelelements import getEClassifier, eClassifiers
from .modelelements import name, nsURI, nsPrefix, eClass
from .modelelements import (
Conform,
ElementGroup,
Expose,
Problem,
Rationale,
Stakeholder,
View,
Viewpoint,
)
from modality.pyuml2.uml import (
Generalization,
Class,
Comment,
Behavior,
Dependency,
Classifier,
Element,
)
from . import modelelements
from .. import sysml14
__all__ = [
"Conform",
"ElementGroup",
"Expose",
"Problem",
"Rationale",
"Stakeholder",
"View",
"Viewpoint",
]
eSubpackages = []
eSuperPackage = sysml14
modelelements.eSubpackages = eSubpackages
modelelements.eSuperPackage = eSuperPackage
otherClassifiers = []
for classif in otherClassifiers:
eClassifiers[classif.name] = classif
classif.ePackage = eClass
for classif in eClassifiers.values():
eClass.eClassifiers.append(classif.eClass)
for subpack in eSubpackages:
eClass.eSubpackages.append(subpack.eClass)
| 2.09375
| 2
|
primes.py
|
chapman-phys220-2018f/cw04-ben-jessica
| 0
|
12779918
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import primes
# <NAME>
# <EMAIL>
# 2300326
# Phys 220
# Cw04
###
# Name: <NAME>
# Student ID: 2262810
# Email: <EMAIL>
# Course: PHYS220/MATH220/CPSC220 Fall 2018
# Assignment: CW03
###
"""primes.py Test Module
Verifies that the implementations for prime number generators are correct.
Here are the primes less than n=200 for completeness:
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199
"""
def eratosthenes_last():
"""Verify the largest prime number under 200.
Note: The equality test for the assert here only works because the
integers inside the list support exact equality.
Question: What data structures work with this sort of test?
"""
assert primes.eratosthenes(200)[-1] == 199
def eratosthenes(n):
"""Verify the number of primes less than 200.
"""
primes = []
for primes in range(1,n):
primes.append(i)
for i in primes:
for n in primes:
if prime(n)// i= 0:
primes.pop(n)
assert len(primes.eratosthenes(200)) == 46
print primes
return primes
def gen_eratosthenes_last():
"""Verify the largest prime number under 200.
"""
g = primes.gen_eratosthenes()
p = next(g)
p2 = next(g)
while p2 < 200:
p, p2 = p2, next(g)
assert p == 199
def gen_eratosthenes(n):
"""Verify the number of primes less than 200.
"""
g = primes.gen_eratosthenes()
ps = [next(g)]
while ps[-1] < 200:
ps.append(next(g))
assert len(ps[:-1]) == 46
| 4.34375
| 4
|
src/spoofbot/util/__init__.py
|
raember/spoofbot
| 0
|
12779919
|
"""Utility module for handy features to be used in conjunction with the core modules"""
from .archive import load_flows
from .common import TimelessRequestsCookieJar
from .common import dict_to_dict_list, url_to_query_dict_list
from .common import dict_to_tuple_list, dict_list_to_dict, dict_list_to_tuple_list, \
dict_to_str, cookie_header_to_dict
from .common import encode_form_data, header_to_snake_case, coerce_content
from .file import load_response, to_filepath, to_url, get_symlink_path
from .har import HarFile, Har, Log, Entry, Creator, Browser, PageTimings, Page, Cache, \
CacheStats, Timings, JsonObject
from .http import ReferrerPolicy
from .http import are_same_origin, are_same_origin_domain, are_schemelessly_same_site, \
are_same_site
from .http import is_ip, is_domain
from .http import opaque_origin, origin_tuple, OriginTuple
from .http import sort_dict
from .versions import get_latest, get_firefox_versions, get_chrome_versions, get_versions_since, random_version
| 1.757813
| 2
|
online_judge/helpers/session.py
|
ashimaathri/online-judge
| 0
|
12779920
|
from functools import wraps
from flask import session, url_for, request, redirect
def is_authenticated():
return 'username' in session
def login_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if is_authenticated():
return f(*args, **kwargs)
else:
return redirect(url_for('auth.display_login_form') + '?next={}'.format(request.path))
return wrapper
def redirect_if_authenticated(f):
@wraps(f)
def wrapper(*args, **kwargs):
if is_authenticated():
try:
return redirect(request.args['next'])
except KeyError:
return redirect(url_for('home_page.display_problem_list'))
else:
return f(*args, **kwargs)
return wrapper
| 2.828125
| 3
|
src/rotary_table_api/rotary_table_messages.py
|
CebulowyNinja/rotary-table-antenna-measurement
| 1
|
12779921
|
<filename>src/rotary_table_api/rotary_table_messages.py<gh_stars>1-10
from abc import ABC, abstractmethod
from typing import Type
import crc8
def round_to(val: float, precision: float):
if val % precision < precision/2:
val -= val % precision
else:
val += precision - val % precision
return val
ADDRESS_LENGTH = 4
PREAMBLE = b"\x5D"
class Request(ABC):
def __init__(self, address: int):
self.address = address
@property
def address(self) -> int:
return self.__address
@address.setter
def address(self, address: int):
if address > 2**ADDRESS_LENGTH-1:
raise ValueError("Rotary table message address must be lower than 16.")
else:
self.__address = address
@abstractmethod
def get_command(self) -> int:
raise NotImplementedError("Method get_command() must be overrided.")
def get_body(self) -> bytes:
return bytes(3)
def get_content(self) -> bytes:
header = self.address << (8-ADDRESS_LENGTH) | self.get_command()
return PREAMBLE + header.to_bytes(1, byteorder="big") + self.get_body()
def get_CRC(self) -> bytes:
hash = crc8.crc8()
hash.update(self.get_content())
return hash.digest()
def to_bytes(self) -> bytes:
return self.get_content() + self.get_CRC()
def __eq__(self, other):
if isinstance(other, Request):
return self.get_content() == other.get_content()
return False
class RequestGetStatus(Request):
def get_command(self) -> int:
return 0
class RequestSetHome(Request):
def get_command(self) -> int:
return 1
class RequestHalt(Request):
def get_command(self) -> int:
return 2
class RequestDisable(Request):
def get_command(self) -> int:
return 3
class RequestGetConverterStatus(Request):
def get_command(self) -> int:
return 5
RPM_PRECISION = 0.25
SPEED_MAX = 2**7
RPM_MAX = SPEED_MAX*RPM_PRECISION
ANGLE_FRACTION_LENGTH = 7
ANGLE_PRECISION = 2**-3
def rpm_to_bytes(rpm: float) -> bytes:
return int(rpm/RPM_PRECISION).to_bytes(1, byteorder="big", signed=True)
def rpm_from_bytes(data: bytes) -> float:
if len(data) != 1:
raise ValueError("rpm_from_bytes() argument must has length = 1")
return int.from_bytes(data, byteorder="big", signed=True)*RPM_PRECISION
def angle_to_bytes(angle: float) -> bytes:
angle_fp = int(angle * 2**ANGLE_FRACTION_LENGTH)
return angle_fp.to_bytes(2, byteorder="big")
def angle_from_bytes(data: bytes) -> float:
if len(data) != 2:
raise ValueError("angle_from_bytes() argument must has length = 2")
return int.from_bytes(data, byteorder="big") * 2**-ANGLE_FRACTION_LENGTH
class RequestRotate(Request):
def __init__(self, address: int, angle: float, rpm: float):
self.address = address
self.rpm = rpm
self.angle = angle
@property
def rpm(self) -> float:
return self.__rpm
@rpm.setter
def rpm(self, rpm: float):
if abs(rpm) > RPM_MAX:
raise ValueError(f"Rotary table message rpm must be between {-RPM_MAX} and {RPM_MAX} .")
self.__rpm = round_to(rpm, RPM_PRECISION)
@property
def angle(self) -> float:
return self.__angle
@angle.setter
def angle(self, angle: float):
angle = angle % 360
self.__angle = round_to(angle, ANGLE_PRECISION)
def get_command(self) -> int:
return 4
def get_body(self) -> bytes:
return angle_to_bytes(self.angle) + rpm_to_bytes(self.rpm)
REPONSE_LENGTH = 9
class Response():
def __init__(self, data: bytes):
if len(data) != REPONSE_LENGTH:
raise ValueError(f"Response data must be {REPONSE_LENGTH} bits length.")
self.__preamble = data[0:1]
self.__payload = data[1:-1]
self.__crc = data[-1:]
@property
def payload(self) -> bytes:
return self.__payload
@property
def preamble(self) -> bytes:
return self.__preamble
@property
def crc(self) -> bytes:
return self.__crc
@property
def address(self) -> int:
if len(self.payload) < 1:
return None
return self.payload[0] >> ADDRESS_LENGTH
@property
def response_header(self) -> int:
if len(self.payload) < 1:
return None
return self.payload[0] & (2**ADDRESS_LENGTH-1)
def calc_CRC(self) -> bytes:
hash = crc8.crc8()
hash.update(self.preamble)
hash.update(self.payload)
return hash.digest()
@property
def is_valid(self) -> bool:
return self.preamble == PREAMBLE and self.calc_CRC() == self.crc
def __eq__(self, other):
if isinstance(other, Response):
return self.payload == other.payload
return False
IS_VOLTAGE_OK_MASK = 0b1
IS_MOTOR_OK_MASK = 0b1
IS_ROTATING_MASK = 0b1<<1
IS_ENABLED_MASK = 0b1<<2
IS_CRC_VALID_MASK = 0b1<<3
VOLTAGE_FRACTION_LENGTH = 4
class ResponseMotorStatus(Response):
@property
def status(self) -> int:
return self.payload[1]
@property
def is_motor_OK(self) -> bool:
return (self.status & IS_MOTOR_OK_MASK) > 0
@property
def is_rotating(self) -> bool:
return (self.status & IS_ROTATING_MASK) > 0
@property
def is_enabled(self) -> bool:
return (self.status & IS_ENABLED_MASK) > 0
@property
def is_crc_valid(self) -> bool:
return (self.status & IS_CRC_VALID_MASK) > 0
@property
def current_angle(self) -> float:
return angle_from_bytes(self.payload[2:4])
@property
def target_angle(self) -> float:
return angle_from_bytes(self.payload[4:6])
@property
def rpm(self) -> float:
return rpm_from_bytes(self.payload[6:7])
class ResponseConverterStatus(Response):
@property
def status(self) -> int:
return self.payload[1]
@property
def is_voltage_OK(self) -> bool:
return (self.status & IS_VOLTAGE_OK_MASK) > 0
@property
def voltage(self) -> float:
return self.payload[2] * 2**-VOLTAGE_FRACTION_LENGTH
@property
def reserved_data(self) -> bytes:
return self.payload[3:]
def parse_response(data: bytes) -> Type[Response]:
resp = Response(data)
if resp.response_header == 0xE:
return ResponseConverterStatus(data)
elif resp.response_header == 0xF:
return ResponseMotorStatus(data)
return resp
| 2.9375
| 3
|
data/data_split/label_split_utils.py
|
z1021190674/GMAUResNeXt_RS
| 1
|
12779922
|
<filename>data/data_split/label_split_utils.py
import numpy as np
import cv2
import os
# from osgeo import gdal_array
def color2label(img, color2label_dict):
"""
Description:
transform an colored label image to label image
Params:
img -- image of shape (height, width, channel)
color2label_dict -- a dict of the following type
color2label_dict = {
"255,255,255": 0,
"0,0,255": 1,
"0,255,255": 2,
"0,255,0": 3,
"255,255,0": 4,
"255,0,0": 5,
}
Return:
label -- label of the image according to the color2label_dict,
a nparray of shape(height, width)
"""
label = np.zeros((img.shape[0],img.shape[1]))
# iterate over the image
for i in range(img.shape[0]):
for j in range(img.shape[1]):
# turn one pixel into label
pixel_str = str(img[i,j][0]) + ',' + str(img[i,j][1]) + ',' + str(img[i,j][2])
label[i,j] = color2label_dict[pixel_str]
print(i)
return label
def save_color2label(data_list, root_path, target_dir, color2label_dict):
"""
Description:
transform the image to label according to the dict, and save it to the target directory
Params:
data_list -- data list of the split data
root_path -- root of the data_list
target_dir -- the directory to save the label image
color2label_dict -- the dict of the colored segmentation map to the label
Notice:
the data in the data list
"""
if not os.path.exists(target_dir):
# os.system('mkdir -p ' + root_path)
os.makedirs(target_dir)
for i in range(len(data_list)):
# get the path
path = os.path.join(root_path, data_list[i])
target_path = os.path.join(target_dir, data_list[i])
# transform color to label
img = cv2.imread(path)[:,:,[2,1,0]]
# img = gdal_array.LoadFile(path).transpose(1,2,0)
label = color2label(img, color2label_dict)
# for test
# img =label2color(label.astype(np.uint8))
# cv2.imshow("img",img)
# cv2.waitKey()
cv2.imwrite(target_path, label)
def label2color(label, label2color_dict={}):
"""
Description:
transform a label image to a colored label rgb image
Params:
label -- label of shape (height, width)
label2color_dict -- a dict of the following type
{
"0": [255,255,255],
"1": [0,0,255],
"2": [0,255,255],
"3": [0,255,0],
"4": [255,255,0],
"5": [255,0,0],
}
Return:
image -- image of the label according to the label2color_dict,
a nparray of shape(height, width, 3)
"""
if len(label2color_dict) == 0:
# Potsdam
label2color_dict = {
"0": [255,255,255],
"1": [0,0,255],
"2": [0,255,255],
"3": [0,255,0],
"4": [255,255,0],
"5": [255,0,0],
}
img = np.zeros((label.shape[0],label.shape[1], 3), dtype=np.uint8)
# iterate over the image
for i in range(label.shape[0]):
for j in range(label.shape[1]):
# turn one pixel into rgb image
pixel_str = str(label[i,j])
img[i,j] = np.array(label2color_dict[pixel_str])
return img
if __name__ == '__main__':
# the default color2label_dict is from Vaihingen and Potsdam datasets
# 1. Impervious surfaces (RGB: 255, 255, 255)
# 2. Building (RGB: 0, 0, 255)
# 3. Low vegetation (RGB: 0, 255, 255)
# 4. Tree (RGB: 0, 255, 0)
# 5. Car (RGB: 255, 255, 0)
# 6. Clutter/background (RGB: 255, 0, 0)
color2label_dict = {
"255,255,255": 0,
"0,0,255": 1,
"0,255,255": 2,
"0,255,0": 3,
"255,255,0": 4,
"255,0,0": 5,
}
x =color2label_dict["255,255,255"]
# from osgeo import gdal_array
# img = gdal_array.LoadFile(r'D:\ISPRS\ISPRS 2D Semantic Labeling Contest\potsdam\5_Labels_all\top_potsdam_2_10_label.tif').transpose(1,2,0)
# label = color2label(img, color2label_dict)
# y = []
| 3.359375
| 3
|
PyPoll/Script_election.py
|
Egallego77/python-challenge
| 0
|
12779923
|
# #This will allow us to create file paths accross operating systems
import pathlib
# #Path to collect data from Recources folder
election_csvpath =pathlib.Path('PyPoll/Resources/election_data.csv')
#Module for reading CSV files
import csv
with open(election_csvpath, mode='r') as csvfile:
#CSV reader specifies delimiter and variable that holds content
reader = csv.reader(csvfile, delimiter= ',')
header = next(csvfile)
votes = {}
for row in reader:
#complete list of canditates who received votes
#candidates vote count
candidate_name = row[2]
if candidate_name in votes:
votes[candidate_name] += 1
else:
votes[candidate_name] = 1
print (votes)
vote_counts = (list(votes.values()))
# Total number of votes cast
total_count = sum(vote_counts)
print(total_count)
winner = list(votes.keys())[0]
votes_summary = {}
for candidate in votes.keys():
if votes[candidate] >votes[winner]:
winner = candidate
votes_summary[candidate] = {'votes': votes[candidate], 'vote_pct': round((votes[candidate]/total_count)*100,2)}
if candidate== winner:
votes_summary[candidate]["is_winner"] = True
else:
votes_summary[candidate]["is_winner"] = False
print(votes_summary)
election_result = pathlib.Path('PyPoll/Analysis/election_results.txt')
with open(election_result,'w') as outputfile:
csvwriter = csv.writer(outputfile)
election_result = (
f"\n\nElection Results\n"
f"-------------------------\n"
f"Total Votes: {total_count}\n"
f"-------------------------\n"
)
print(election_result, end="")
outputfile.write(election_result)
for candidate in votes_summary.keys():
voter_output = f"{candidate}: {votes_summary[candidate]['vote_pct']}% ({votes_summary[candidate]['votes']})\n"
print(voter_output, end="")
outputfile.write(voter_output)
winning_candidate_summary = (
f"-------------------------\n"
f"Winner: {winner}\n"
f"-------------------------\n"
)
outputfile.write(winning_candidate_summary)
print(winning_candidate_summary)
| 3.421875
| 3
|
bingo_board/tastypie/__init__.py
|
colinsullivan/bingo-board
| 2
|
12779924
|
__author__ = '<NAME>, <NAME>, <NAME>'
__version__ = (0, 9, 5)
| 1.148438
| 1
|
packages/SwingSet/misc-tools/count-mint-exports.py
|
danwt/agoric-sdk
| 4
|
12779925
|
#!/usr/bin/env python3
# goal: of the 6230 objects exported by v5 (vat-mints), how many are Purses vs Payments vs other?
import sys, json, time, hashlib, base64
from collections import defaultdict
exports = {} # kref -> type
double_spent = set()
unspent = set() # kref
died_unspent = {}
def find_interfaces(body):
if isinstance(body, list):
for item in body:
yield from find_interfaces(item)
elif isinstance(body, dict):
if "@qclass" in body:
if body["@qclass"] == "slot":
iface = body.get("iface", None)
index = body["index"]
yield (index, iface)
else:
for item in body.values():
yield from find_interfaces(item)
for line in sys.stdin:
data = json.loads(line)
if data.get("vatID", None) != "v5":
continue
if data["type"] == "syscall":
if data["ksc"][0] == "send":
raise Error("vat-mints never exports anything")
if data["ksc"][0] == "resolve":
resolutions = data["ksc"][2]
for (kref, rejection, capdata) in resolutions:
slots = capdata["slots"]
for (index, iface) in find_interfaces(json.loads(capdata["body"])):
kref = slots[index]
#print("export", kref, iface)
exports[kref] = iface
unspent.add(kref)
if data["type"] == "deliver":
if data["kd"][0] == "message" and data["kd"][2]["method"] in ["deposit", "burn"]:
kref = data["kd"][2]["args"]["slots"][0]
if kref not in unspent:
double_spent.add(kref)
unspent.discard(kref)
if data["kd"][0] == "dropExports":
for kref in data["kd"][1]:
#print("delete", kref)
if kref in unspent:
print("died unspent:", kref)
died_unspent[kref] = exports[kref]
unspent.remove(kref)
del exports[kref]
counts = defaultdict(int)
for kref in sorted(exports):
iface = exports[kref].removeprefix("Alleged: ")
counts[iface] += 1
#print(kref, exports[kref])
for iface in sorted(counts):
print("%20s : %4d" % (iface, counts[iface]))
print("total:", sum(counts.values()))
print("live unspent:", len(unspent))
counts = defaultdict(int)
for kref in unspent:
iface = exports[kref].removeprefix("Alleged: ")
counts[iface] += 1
for iface in sorted(counts):
print(" %20s : %4d" % (iface, counts[iface]))
print("died unspent:", len(died_unspent))
counts = defaultdict(int)
for kref,iface in died_unspent.items():
iface = iface.removeprefix("Alleged: ")
counts[iface] += 1
for iface in sorted(counts):
print(" %20s : %4d" % (iface, counts[iface]))
print("double spent:", len(double_spent))
| 2.234375
| 2
|
deeppavlov/tasks/coreference_scorer_model/agents.py
|
deepmipt/kpi2017
| 3
|
12779926
|
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import numpy as np
import tensorflow as tf
from parlai.core.agents import Teacher
from . import utils
from .build import build
from ...utils import coreference_utils
class CoreferenceTeacher(Teacher):
"""Teacher for coreference resolution task"""
@staticmethod
def add_cmdline_args(argparser):
"""Parameters of agent and default values"""
group = argparser.add_argument_group('Coreference Teacher')
group.add_argument('--language', type=str, default='ru')
group.add_argument('--predictions_folder', type=str, default='predicts',
help='folder where to dump conll predictions, scorer will use this folder')
group.add_argument('--scorer_path', type=str, default='scorer/reference-coreference-scorers/v8.01/scorer.pl',
help='path to CoNLL scorer perl script')
group.add_argument('--valid_ratio', type=float,
default=0.2, help='valid_set ratio')
group.add_argument('--test_ratio', type=float,
default=0.2, help='test_set ratio')
group.add_argument('--teacher_seed', type=int, default=42, help='seed')
group.add_argument('--raw-dataset-path', type=str, default=None,
help='Path to folder with two subfolders: dataset and scorer. '
'These two folders are extracted rucoref_29.10.2015.zip and '
'reference-coreference-scorers.v8.01.tar.gz')
def __init__(self, opt, shared=None):
"""Initialize the parameters for CoreferenceTeacher"""
super().__init__(opt, shared)
self.last_observation = None
self.id = 'two-step-coref'
self.seed = opt['teacher_seed']
np.random.seed(seed=self.seed)
random.seed(a=self.seed)
tf.set_random_seed(seed=self.seed)
if shared:
raise RuntimeError('Additional batching is not supported')
build(opt)
self.dt = opt['datatype'].split(':')[0]
self.datapath = os.path.join(opt['datapath'], 'coreference_scorer_model', opt['language'])
self.valid_path = None
self.train_path = None
self.predictions_folder = os.path.join(self.datapath, opt['predictions_folder'], self.dt)
self.scorer_path = os.path.join(self.datapath, opt['scorer_path'])
# in train mode we use train dataset to train model
# and valid dataset to adjust threshold
# in valid and test mode we use test dataset
if self.dt == 'train':
self.valid_path = os.path.join(self.datapath, 'valid')
self.train_path = os.path.join(self.datapath, 'train')
elif self.dt in ['test', 'valid']:
self.valid_path = os.path.join(self.datapath, 'test')
else:
raise ValueError('Unknown mode: {}. Available modes: train, test, valid.'.format(self.dt))
self.train_documents = [] if self.train_path is None else list(sorted(os.listdir(self.train_path)))
self.valid_documents = [] if self.valid_path is None else list(sorted(os.listdir(self.valid_path)))
self.len = 1
self.epoch = 0
self._epoch_done = False
def act(self):
"""reads all documents and returns them"""
self._epoch_done = True
train_conll = [open(os.path.join(self.train_path, file), 'r').readlines() for file in self.train_documents]
valid_conll = [open(os.path.join(self.valid_path, file), 'r').readlines() for file in self.valid_documents]
return {'id': self.id, 'conll': train_conll, 'valid_conll': valid_conll}
def observe(self, observation):
"""saves observation"""
self.last_observation = observation
self.epoch += 1
def report(self):
"""calls scorer on last observation and reports result"""
utils.save_observations(self.last_observation['valid_conll'], self.predictions_folder)
res = coreference_utils.score(self.scorer_path, self.valid_path, self.predictions_folder)
return {'f1': res['conll-F-1']}
def reset(self):
self._epoch_done = False
def epoch_done(self):
return self._epoch_done
def __len__(self):
return self.len
| 2.03125
| 2
|
5_distance_analysis/nbconverted/0_process_park.py
|
jjc2718/mutation-fn
| 0
|
12779927
|
#!/usr/bin/env python
# coding: utf-8
# ## Load and process Park et al. data
#
# For each sample, we want to compute:
#
# * (non-silent) binary mutation status in the gene of interest
# * binary copy gain/loss status in the gene of interest
# * what "class" the gene of interest is in (more detail on what this means below)
#
# We'll save this to a file since the preprocessing takes a few minutes, so we can load it quickly in downstream analysis scripts.
# In[1]:
from pathlib import Path
import pickle as pkl
import pandas as pd
import sys; sys.path.append('..')
import config as cfg
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# In[2]:
# park et al. geneset info
park_loss_data = cfg.data_dir / 'park_loss_df.tsv'
park_gain_data = cfg.data_dir / 'park_gain_df.tsv'
# park et al. significant gene info
park_loss_sig_data = cfg.data_dir / 'park_loss_df_sig_only.tsv'
park_gain_sig_data = cfg.data_dir / 'park_gain_df_sig_only.tsv'
# mutation and copy number data
pancancer_pickle = Path('/home/jake/research/mpmp/data/pancancer_data.pkl')
# ### Load data from Park et al. supp. info
# In[3]:
park_loss_df = pd.read_csv(park_loss_data, sep='\t', index_col=0)
park_loss_df.head()
# In[4]:
park_gain_df = pd.read_csv(park_gain_data, sep='\t', index_col=0)
park_gain_df.head()
# ### Load mutation and CNV info
# In[5]:
with open(pancancer_pickle, 'rb') as f:
pancancer_data = pkl.load(f)
# In[6]:
# get (binary) mutation data
# 1 = observed non-silent mutation in this gene for this sample, 0 otherwise
mutation_df = pancancer_data[1]
print(mutation_df.shape)
mutation_df.iloc[:5, :5]
# In[7]:
# we use the data source and preprocessing code from the pancancer repo, here:
# https://github.com/greenelab/pancancer/blob/d1b3de7fa387d0a44d0a4468b0ac30918ed66886/scripts/initialize/process_copynumber.py#L21
copy_thresh_df = (
pd.read_csv(cfg.data_dir / 'pancan_GISTIC_threshold.tsv',
sep='\t', index_col=0)
.drop(columns=['Locus ID', 'Cytoband'])
)
copy_thresh_df.columns = copy_thresh_df.columns.str[0:15]
# thresholded copy number includes 5 values [-2, -1, 0, 1, 2], which
# correspond to "deep loss", "moderate loss", "no change",
# "moderate gain", and "deep gain", respectively.
print(copy_thresh_df.shape)
copy_thresh_df.iloc[:5, :5]
# In[8]:
sample_freeze_df = pancancer_data[0]
copy_samples = list(
set(sample_freeze_df.SAMPLE_BARCODE)
.intersection(set(copy_thresh_df.columns))
)
print(len(copy_samples))
# In[9]:
# make sure we're not losing too many samples, a few is fine
print(sorted(set(sample_freeze_df.SAMPLE_BARCODE) - set(copy_thresh_df.columns)))
# In[10]:
copy_thresh_df = (copy_thresh_df
.T
.loc[sorted(copy_samples)]
.fillna(0)
.astype(int)
)
print(copy_thresh_df.shape)
copy_thresh_df.iloc[:5, :5]
# In[11]:
# here, we want to use "moderate" and "deep" loss/gain to define CNV
# loss/gain (to match Park et al.)
#
# note that this is different to the more conservative approach of using
# "deep loss/gain" only as in our classifiers
copy_loss_df = (copy_thresh_df
.replace(to_replace=[1, 2], value=0)
.replace(to_replace=[-1, -2], value=1)
)
print(copy_loss_df.shape)
copy_loss_df.iloc[:5, :5]
# In[12]:
copy_gain_df = (copy_thresh_df
.replace(to_replace=[-1, -2], value=0)
.replace(to_replace=[1, 2], value=1)
)
print(copy_gain_df.shape)
copy_gain_df.iloc[:5, :5]
# ### Classify genes/cancer types into "classes"
#
# In [the Park et al. paper](https://www.nature.com/articles/s41467-021-27242-3#Sec4), they describe 4 "classes" of driver genes:
#
# 1. Genes that function exclusively as one-hit drivers, no significant co-occurrence with CNAs
# 2. Genes that interact with CNA loss in at least one cancer type - "two-hit loss" drivers (i.e. classical tumor suppressors)
# 3. Genes that interact with CNA gain in at least one cancer type - "two-hit gain" drivers (for some examples/explanation of "two-hit" oncogenes, see [this paper](https://www.nature.com/articles/s41586-020-2175-2))
# 4. Genes that interact with both CNA loss and CNA gain across multiple cancer types - "two-hit loss and gain" drivers
#
# Here, we label each of the genes from the Park et al. data with their "class", since we want to segment our analyses in this way too.
# In[13]:
park_loss_sig_df = pd.read_csv(park_loss_sig_data, sep='\t', index_col=0)
park_gain_sig_df = pd.read_csv(park_gain_sig_data, sep='\t', index_col=0)
class_4_ids = (
set(park_loss_sig_df.index.unique()).intersection(
set(park_gain_sig_df.index.unique())
))
class_2_ids = set(park_loss_sig_df.index.unique()) - class_4_ids
class_3_ids = set(park_gain_sig_df.index.unique()) - class_4_ids
class_1_ids = (
set(park_loss_df.index.unique()) - (
class_4_ids.union(class_2_ids, class_3_ids)
)
)
print(len(park_loss_df.index.unique()))
print('class 1:', len(class_1_ids))
print('class 2:', len(class_2_ids))
print('class 3:', len(class_3_ids))
print('class 4:', len(class_4_ids))
print(sorted(class_4_ids))
# In[14]:
def id_to_class(i):
if i in class_2_ids:
return 'class 2'
elif i in class_3_ids:
return 'class 3'
elif i in class_4_ids:
return 'class 4'
else:
return 'class 1'
loss_class = {i: id_to_class(i) for i in park_loss_df.index.unique()}
park_loss_df['class'] = park_loss_df.index.map(loss_class)
print(park_loss_df['class'].unique())
park_loss_df.head()
# In[15]:
gain_class = {i: id_to_class(i) for i in park_gain_df.index.unique()}
park_gain_df['class'] = park_gain_df.index.map(gain_class)
print(park_gain_df['class'].unique())
park_gain_df.head()
# In[16]:
sample_freeze_df.head()
# ### Retrieve and format per-sample information
#
# We want to generate a dataframe with the following information:
#
# * Sample ID, gene/tissue
# * Mutation status (binary) for sample in gene
# * CNV status (binary) for sample in gene, gain/loss for oncogene/TSG respectively
# * Park et al. gene "class" (class 1/2/3/4 as defined above)
# * Sample "number of hits" (none/one/both)
# In[17]:
def get_info_for_gene_and_tissue(identifier, all_info_df, copy_change):
"""Given a gene and tissue, load the relevant mutation information.
'status' is what we will segment our plots by: 'none' == neither a point
mutation or CNV observed for the given sample, 'one' == either a point
mutation or CNV but not both, 'both' == both point mutation and CNV
"""
info_df = {}
gene, tissue = identifier.split('_')
if tissue == 'COADREAD':
tissue_samples = (
sample_freeze_df[sample_freeze_df.DISEASE.isin(['COAD', 'READ'])]
.SAMPLE_BARCODE
)
else:
tissue_samples = (
sample_freeze_df[sample_freeze_df.DISEASE == tissue]
.SAMPLE_BARCODE
)
# TODO: not sure why these don't match
tissue_samples = (
mutation_df.index.intersection(tissue_samples)
.intersection(copy_loss_df.index)
.intersection(copy_gain_df.index)
)
class_name = (all_info_df
.loc[all_info_df.index == identifier, ['class']]
).values[0]
info_df['class_name'] = class_name
# get mutation status for samples
info_df['mutation_status'] = mutation_df.loc[tissue_samples, gene].values
# get copy status for samples
if copy_change == 'gain':
info_df['cnv_status'] = copy_loss_df.loc[tissue_samples, gene].values
elif copy_change == 'loss':
info_df['cnv_status'] = copy_gain_df.loc[tissue_samples, gene].values
info_df = pd.DataFrame(info_df, index=tissue_samples)
def hits_from_mut_info(row):
if row['mutation_status'] == 1 and row['cnv_status'] == 1:
return 'both'
elif row['mutation_status'] == 1 or row['cnv_status'] == 1:
return 'one'
else:
return 'none'
info_df['num_hits'] = info_df.apply(hits_from_mut_info, axis=1)
return info_df
get_info_for_gene_and_tissue('TP53_BRCA', park_loss_df, 'loss')
# ### Format and pickle all per-sample info
#
# We'll end up pickling a dict that maps each identifier (gene/cancer type combination) to a dataframe, assigning a "num_hits" class to each sample for that gene.
#
# We'll create two of these, one for copy gains and one for copy losses, to be used downstream in our distance/similarity analyses.
# In[18]:
cfg.distance_data_dir.mkdir(exist_ok=True)
park_gain_num_hits = {}
for identifier in park_gain_df.index:
park_gain_num_hits[identifier] = get_info_for_gene_and_tissue(identifier,
park_gain_df,
'gain')
park_gain_num_hits['TP53_BRCA'].head()
# In[19]:
with open(cfg.distance_gain_info, 'wb') as f:
pkl.dump(park_gain_num_hits, f)
# In[20]:
park_loss_num_hits = {}
for identifier in park_loss_df.index:
park_loss_num_hits[identifier] = get_info_for_gene_and_tissue(identifier,
park_loss_df,
'loss')
park_loss_num_hits['TP53_BRCA'].head()
# In[21]:
with open(cfg.distance_loss_info, 'wb') as f:
pkl.dump(park_loss_num_hits, f)
| 2.390625
| 2
|
pystyx/parser.py
|
styx-dev/pystyx
| 0
|
12779928
|
"""
Parse, don't validate. - <NAME>
"""
from munch import Munch
from .functions import TomlFunction
from .shared import OnThrowValue
def parse_on_throw(from_obj, to_obj):
"""
Expects "or_else" to already have been processed on "to_obj"
"""
throw_action = {
"or_else": OnThrowValue.OrElse,
"throw": OnThrowValue.Throw,
"skip": OnThrowValue.Skip,
}.get(from_obj.on_throw, None)
if not throw_action:
raise TypeError(f"Unknown 'on_throw' action given: {from_obj.on_throw}")
if throw_action == OnThrowValue.OrElse and not hasattr(to_obj, "or_else"):
raise TypeError(
"If 'on_throw' action is 'or_else', then 'or_else' must be defined."
)
return throw_action
class ProcessParser:
def process(self, process):
process_obj = Munch()
for action_name, action in process.items():
process_obj[action_name] = self.process_action(action)
return process_obj
def process_action(self, action):
action_obj = Munch()
if isinstance(action.input_paths, list) and all(
isinstance(element, str) for element in action.input_paths
):
action_obj.input_paths = action.input_paths
else:
raise TypeError("input_paths must be a list of strings.")
if isinstance(action.output_path, str):
action_obj.output_path = action.output_path
else:
raise TypeError("output_path must be a string.")
if action.function in TomlFunction._functions:
action_obj.function = TomlFunction._functions[action.function]
else:
raise TypeError(f"unknown function: {action.function}")
many = action.pop("many", False) is True
action_obj.many = many
if action.get("or_else"):
action_obj.or_else = action.or_else
if action.get("on_throw"):
throw_action = parse_on_throw(action, action_obj)
action_obj.on_throw = throw_action
return action_obj
class PreprocessParser(ProcessParser):
def parse(self, preprocess):
return self.process(preprocess)
class PostprocessParser(ProcessParser):
def parse(self, postprocess):
return self.process(postprocess)
class FieldsParser:
reserved_words = {
"input_paths",
"possible_paths",
"path_condition",
"from_type",
"to_type",
"function",
"or_else",
"on_throw",
"mapping",
}
def parse(self, fields):
field_objs = Munch()
many = fields.pop("many", False) is True
field_objs.many = many
if not fields:
raise TypeError("'fields' cannot be empty (what are we mapping?)")
for field_name, field in fields.items():
field_obj = self.parse_field(field)
field_objs[field_name] = self.parse_extra_fields(
field_name, field, field_obj
)
return field_objs
def parse_field(self, field):
field_obj = Munch()
field_obj = self.parse_paths(field, field_obj)
if hasattr(field, "or_else"):
field_obj.or_else = field.or_else
if field.get("on_throw"):
throw_action = parse_on_throw(field, field_obj)
field_obj.on_throw = throw_action
if field.get("from_type"):
# TODO: Is it possible to check valid definitions during parse?
field_obj.from_type = field.from_type
if field.get("mapping"):
# TODO: 'mapping' and 'from_type' should not both be possible
field_obj.mapping = field.mapping
if field.get("function"):
if field.function in TomlFunction._functions:
field_obj.function = TomlFunction._functions[field.function]
else:
raise TypeError(f"unknown function: {field.function}")
return field_obj
def parse_paths(self, field, field_obj):
if not hasattr(field, "input_paths") and not hasattr(field, "possible_paths"):
raise TypeError(
"Either 'input_paths' or 'possible_paths' must be declared. Aborting."
)
if hasattr(field, "input_paths") and hasattr(field, "possible_paths"):
raise TypeError(
"Either 'input_paths' or 'possible_paths' must be declared, but not both."
)
if hasattr(field, "input_paths"):
field_obj.input_paths = self.parse_input_paths(field)
else:
field_obj.possible_paths = self.parse_possible_paths(field)
field_obj.path_condition = field.path_condition
return field_obj
def parse_input_paths(self, field):
if isinstance(field.input_paths, list) and all(
isinstance(element, str) for element in field.input_paths
):
if len(field.input_paths) > 1 and not field.get("function"):
raise TypeError(
"'input_paths' must be of length 1 if 'function' is not defined"
)
return field.input_paths
else:
raise TypeError("input_paths must be a list of strings.")
def parse_possible_paths(self, field):
if isinstance(field.possible_paths, list) and all(
isinstance(element, str) for element in field.possible_paths
):
if not field.get("path_condition"):
raise TypeError(
"'path_condition' must be set if 'possible_paths' is set."
)
return field.possible_paths
else:
raise TypeError("possible_paths must be a list of strings.")
def parse_extra_fields(self, field_name, field, field_obj):
"""
Handle non-reserved keywords on the Field object
For now, the only allowed non-reserved keyword is the parent's field_name
"""
from_type = field.get("from_type")
field_obj["_copy_fields"] = []
for key, value in field.items():
if key in self.reserved_words:
continue
if key != field_name:
raise TypeError(f"Unknown key found on field definition: {field_name}")
if not from_type:
raise TypeError(
"Custom values cannot be set on a definition without declaring a nested object from_type"
)
field_obj[key] = value
for nested_key in value:
field_obj["_copy_fields"].append(nested_key)
return field_obj
class Parser:
def parse(self, toml_obj: Munch):
if not hasattr(toml_obj, "from_type"):
raise TypeError("'from_type' must be declared at the top-level.")
from_type = toml_obj.from_type
if not hasattr(toml_obj, "to_type"):
raise TypeError("'to_type' must be declared at the top-level.")
to_type = toml_obj.to_type
type_ = toml_obj.pop("__type__", "object")
include_type = toml_obj.pop("include_type", True) is True
if type_ not in ("object", "list"):
raise TypeError(
f"Only declared types available for __type__ are: object, list. Found: {type_}"
)
parsed_obj = Munch()
parsed_obj.to_type = to_type
parsed_obj.__type__ = type_
parsed_obj.include_type = include_type
if toml_obj.get("preprocess"):
parser = PreprocessParser()
parsed_obj["preprocess"] = parser.parse(toml_obj.preprocess)
if not hasattr(toml_obj, "fields"):
raise TypeError(
"'fields' is a required field for a Styx definition mapping."
)
fields_parser = FieldsParser()
parsed_obj["fields"] = fields_parser.parse(toml_obj.fields)
if toml_obj.get("postprocess"):
parser = PostprocessParser()
parsed_obj["postprocess"] = parser.parse(toml_obj.postprocess)
return from_type, to_type, parsed_obj
| 2.734375
| 3
|
scrapy_news/scrapy_news/spiders/news_spider.py
|
DreamCloudWalker/MySimpleServers
| 0
|
12779929
|
<filename>scrapy_news/scrapy_news/spiders/news_spider.py<gh_stars>0
import scrapy
import re
from scrapy_news.items import ScrapyNewsItem
class NewsSpiderSpider(scrapy.Spider):
name = 'news_spider'
start_urls = ['https://www.douban.com/gallery/', # 豆瓣
'https://s.weibo.com/top/summary', # 微博
'http://tieba.baidu.com/hottopic/browse/topicList?res_type=1&red_tag=h1923737578', # 百度贴吧
'https://bbs.hupu.com/all-gambia', # 虎扑
'http://top.baidu.com/buzz?b=341&c=513&fr=topbuzz_b1_c513', # 百度今日热点
]
def parse(self, response):
if response.url == self.start_urls[0]: # 豆瓣
yield from self.crawl_douban(response)
if response.url == self.start_urls[1]: # 微博
yield from self.crawl_weibo(response)
if response.url == self.start_urls[2]: # 百度贴吧
yield from self.crawl_tieba(response)
if response.url == self.start_urls[3]: # 虎扑
yield from self.crawl_hupu(response)
if response.url == self.start_urls[4]: # 百度今日热点
yield from self.crawl_topbaidu(response)
def crawl_douban(self, response):
trends = response.css('ul.trend > li > a')
for trend in trends:
item = ScrapyNewsItem()
item['source'] = 'douban_spider'
item['title'] = trend.css('a::text').extract_first()
item['url'] = trend.css('a').attrib['href']
item['remark'] = ''
yield item
def crawl_weibo(self, response):
trends = response.css('td.td-02 > a')
for trend in trends:
item = ScrapyNewsItem()
item['source'] = 'weibo_spider'
item['title'] = trend.css('a::text').extract_first()
href = self.get_weibo_href(trend)
item['url'] = "https://s.weibo.com" + href
item['remark'] = ''
yield item
def crawl_tieba(self, response):
trends = response.css('div.main > ul > li a')
for trend in trends:
item = ScrapyNewsItem()
item['source'] = 'tieba_spider'
item['title'] = trend.css('a::text').extract_first()
item['url'] = trend.css('a').attrib['href']
item['remark'] = ''
yield item
def crawl_hupu(self, response):
trends = response.css('div.list> ul > li >span:nth-child(1) >a')
for trend in trends:
item = ScrapyNewsItem()
item['source'] = 'hupu_spider'
item['title'] = trend.css('a').attrib['title']
item['url'] = "https://bbs.hupu.com" + trend.css('a').attrib['href']
item['remark'] = ''
yield item
def crawl_github(self, response):
trends = response.css('div> article.Box-row ')
for trend in trends:
item = ScrapyNewsItem()
item['source'] = 'github_spider'
title = "".join(trend.css('p::text').extract())
re.sub(r'[\\*|“<>:/()()0123456789]', '', title)
title.replace('\n', '').replace(' ', '')
item['title'] = title
item['url'] = "https://github.com" + trend.css('h1>a').attrib['href']
item['remark'] = ''
yield item
def crawl_topbaidu(self, response):
trends = response.css('td.keyword >a:nth-child(1) ')
for trend in trends:
item = ScrapyNewsItem()
item['source'] = 'topbaidu_spider'
item['title'] = trend.css('a::text').extract_first()
item['url'] = trend.css('a').attrib['href']
item['remark'] = ''
yield item
def get_weibo_href(self, trend):
href = trend.css('a').attrib['href']
if href.startswith('javascript'): ##javascript:void(0)
href = trend.css('a').attrib['href_to']
return href
| 2.84375
| 3
|
plugins/clench.py
|
wffirilat/Neurohacking
| 2
|
12779930
|
# -*- coding: utf-8 -*-
"""
Project: neurohacking
File: clench.py.py
Author: wffirilat
"""
import numpy as np
import time
import sys
import plugin_interface as plugintypes
from open_bci_v3 import OpenBCISample
class PluginClench(plugintypes.IPluginExtended):
def __init__(self):
self.release = True
self.packetnum = -1
self.threshold = None
self.uthreshold = None
self.ticknum = None
self.storelength = 1024
self.starttime = None
self.state = 'unstarted'
self.channel = 3
self.restingmax, self.restingmin = 0, 0
self.clenchmax, self.clenchmin = 0, 0
self.unclenchmax, self.unclenchmin = 0, 0
self.rawdata = np.zeros((8, self.storelength))
self.data = np.zeros((8, self.storelength))
def activate(self):
print("clench activated")
# called with each new sample
def __call__(self, sample: OpenBCISample):
if sample.id == 0:
if self.packetnum == -1:
self.starttime = time.time()
self.packetnum += 1
self.ticknum = self.packetnum * 256 + sample.id
self.rawdata[:, (sample.id + 256 * self.packetnum) % self.storelength] = sample.channel_data
self.data[:, (sample.id + 256 * self.packetnum) % self.storelength] = [v - avg for avg, v in zip(
[sum(self.rawdata[i, :]) / self.storelength for i in range(8)],
sample.channel_data
)]
#print(np.median(self.rawdata[3,:])) #The reason this is here is because it might help our basis be better
if self.state != 'calibrated':
self.calibratetick()
else:
self.tick()
def calibratetick(self):
# print(self.data)
dt = time.time() - self.starttime
if self.state == "unstarted":
print("Prepare to calibrate")
self.state = "positioning"
elif self.state == "positioning":
if dt > 4:
print('Calibrating')
self.state = 'resting'
elif self.state == 'resting':
if dt > 6:
print("Resting data gathered; Prepare to clench")
self.state = 'clench'
return
if self.current >= self.restingmax:
self.restingmax = self.current
if self.current <= self.restingmin:
self.restingmin = self.current
elif self.state == 'clench':
if dt > 7:
print("Clench NOW!")
self.state = 'clenching'
return
elif self.state == 'clenching':
if dt > 9:
print('Unclench!!')
self.state = 'postclench'
return
if self.current > self.clenchmax:
self.clenchmax = self.current
if self.current < self.clenchmin:
self.clenchmin = self.current
elif self.state == 'postclench':
if dt > 10:
self.threshold = self.restingmax + ((self.clenchmax - self.restingmax) / 2)
if self.release:
self.uthreshold = self.restingmin + ((self.clenchmin - self.restingmin) / 2)
self.state = 'calibrated'
print ("Resting Max", self.restingmax, "Resting Min", self.restingmin, "\n")
print ("Clench Max,", self.clenchmax, "Clench Min",self.clenchmin, "\n")
if self.release:
print ("Unclench Max,", self.unclenchmax, "Unclench Min",self.unclenchmin, "\n")
return
if self.release:
if self.current > self.unclenchmax:
self.unclenchmax = self.current
if self.current < self.unclenchmin:
self.unclenchmin = self.current
@property
def current(self):
return self.data[self.channel, self.ticknum % self.storelength]
def tick(self):
if self.current > self.unclenchmax-((self.current-self.unclenchmax)/5):#watch this work!
print(f" {self.current}: Clenched!!")
...
#if self.release:
# if self.current < self.uthreshold:
# print(f" {self.ticknum}: Unclenched!!")
| 2.234375
| 2
|
Discord Bots/Meme6-BOT/cogs/money.py
|
LUNA761/Code-Archive
| 1
|
12779931
|
import discord, time, os, praw, random, json
from discord.ext import commands, tasks
from discord.ext.commands import has_permissions, MissingPermissions
from discord.utils import get
from itertools import cycle
import datetime as dt
from datetime import datetime
done3 = []
beg_lim_users = []
timers = {}
done = []
steal_lim_users = []
timers2 = {}
done2 = []
embeddata = {}
embeddata["icon"] = "http://luna-development.orgfree.com/data/discord/meme6/logo.jpg"
embeddata["name"] = "Meme6"
embeddata["version"] = "2.0"
class App(commands.Cog):
def __init__(self, client):
self.client = client
print("Loading Cog: MONEY")
@commands.command(aliases=["resetmoney", "moneyreset"])
@has_permissions(administrator=True)
async def moneysetup(self, ctx):
os.chdir("money/")
folder = str(str(ctx.guild.id)+"/")
try:
os.chdir(folder)
except:
os.mkdir(folder)
os.chdir(folder)
usr_num = 0
bot_num = 0
for user in ctx.guild.members:
if user.bot == True:
bot_num += 1
elif user.id in done3:
pass
else:
done3.append(user.id)
f = open(f"__{user.id}__.json", "w+")
f.write("500")
f.close()
usr_num += 1
embed=discord.Embed(title="SETUP", description="Running Setup", color=0x00eeff)
embed.set_author(name=embeddata["name"], icon_url=embeddata["icon"])
embed.add_field(name="Guild id:", value=str(ctx.guild.id), inline=False)
embed.add_field(name="Users", value=str(usr_num), inline=False)
embed.add_field(name="Bots", value=str(bot_num), inline=True)
embed.set_footer(text=embeddata["name"]+" ["+embeddata["version"]+"]")
await ctx.send(embed=embed)
os.chdir("..")
os.chdir("..")
@commands.command(aliases=["bal", "bank"])
async def balance(self, ctx, who=None):
os.chdir("money/")
folder = str(str(ctx.guild.id)+"/")
try:
os.chdir(folder)
except:
os.mkdir(folder)
os.chdir(folder)
if who == None:
who = int(ctx.message.author.id)
else:
who = who.replace("@", "").replace("!", "").replace(">", "").replace("<", "")
who = int(who)
f = open(f"__{who}__.json", "r")
bal = f.read()
f.close()
embed=discord.Embed(title="Balance", color=0x00eeff)
embed.set_author(name=embeddata["name"], icon_url=embeddata["icon"])
embed.add_field(name="Total", value="£"+str(bal), inline=False)
embed.set_footer(text=embeddata["name"]+" ["+embeddata["version"]+"]")
await ctx.send(embed=embed)
os.chdir("..")
os.chdir("..")
@tasks.loop(seconds = 1)
async def begtimer():
for user_id in beg_lim_users:
if user_id in done:
pass
old = timers[user_id]
new = old - 1
timers[user_id] = new
if timers[user_id] == 0:
beg_lim_users.remove(user_id)
timers.pop(user_id)
done.remove(user_id)
else:
done.append(user_id)
timers[user_id] = 50
@tasks.loop(seconds = 1)
async def stealtimer():
for user_id in steal_lim_users:
if user_id in done2:
pass
old = timers2[user_id]
new = old - 1
timers2[user_id] = new
if timers2[user_id] == 0:
steal_lim_users.remove(user_id)
timers2.pop(user_id)
done2.remove(user_id)
else:
done2.append(user_id)
timers2[user_id] = 50
@commands.command()
async def beg(self, ctx):
os.chdir("money/")
folder = str(str(ctx.guild.id)+"/")
try:
os.chdir(folder)
except:
os.mkdir(folder)
os.chdir(folder)
if ctx.message.author.id in beg_lim_users:
left = timers[ctx.message.author.id]
await ctx.send(f"You need to wait {left} seconds before you can use this again!")
os.chdir("..")
os.chdir("..")
return
else:
beg_lim_users.append(ctx.message.author.id)
x = random.randint(0, 100)
if x > 25:
c = True
else:
c = False
await ctx.send("No Coins for you!")
if c == True:
amm = random.randint(50, 300)
if amm > 295:
amm = random.randint(400, 500)
await ctx.send(f"Here have {amm} coins!")
f = open(f"__{ctx.message.author.id}__.json")
c_b = f.read()
f.close()
c_b =int(c_b)+int(amm)
f = open(f"__{ctx.message.author.id}__.json", "w+")
f.write(str(c_b))
f.close()
os.chdir("..")
os.chdir("..")
@commands.command()
async def passive(self, ctx, choice):
if choice == "on":
with open("passive.json") as f:
passive_list = json.load(f)
passive_list[ctx.message.author.id] = True
with open("passive.json", "w+") as f:
json.dump(passive_list, f)
await ctx.send(f"Passive mode now {choice}")
elif choice == "off":
with open("passive.json") as f:
passive_list = json.load(f)
passive_list[ctx.message.author.id] = False
with open("passive.json", "w+") as f:
json.dump(passive_list, f)
await ctx.send(f"Passive mode now {choice}")
else:
await ctx.send(f"{choice} is not a valid option, please choose from on or off")
@commands.command()
async def steal(self, ctx, who=None):
with open("passive.json") as f:
passive_list = json.load(f)
p = passive_list[str(ctx.message.author.id)]
if p == True:
await ctx.send("You can't steal, your in passive mode you can change that using the passive command")
return
if ctx.message.author.id in steal_lim_users:
left = timers2[ctx.message.author.id]
await ctx.send(f"You need to wait {left} seconds before you can use this again!")
os.chdir("..")
os.chdir("..")
return
else:
steal_lim_users.append(ctx.message.author.id)
os.chdir("money/")
folder = str(str(ctx.guild.id)+"/")
try:
os.chdir(folder)
except:
os.mkdir(folder)
os.chdir(folder)
w = who
if who == None:
await ctx.send("You need to tell me who to steal from!")
else:
who = who.replace("@", "").replace("!", "").replace(">", "").replace("<", "")
who = int(who)
f = open(f"__{who}__.json")
ee = f.read()
if int(ee) < 400:
await ctx.send("This person does not have more than 400 in there bank its not worth it!")
f.close()
return
f.close()
chance = random.randint(0, 100)
if chance > 30:
x = True
else:
await ctx.send("Oh no, you have been caught!")
x = False
if x == True:
amm = random.randint(1, 400)
await ctx.send(f"You stole {amm} from {w}")
f = open(f"__{ctx.message.author.id}__.json")
c = f.read()
f.close()
c = int(c)+amm
f = open(f"__{ctx.message.author.id}__.json", "w+")
f.write(str(c))
f.close()
f = open(f"__{who}__.json")
c = f.read()
f.close()
c = int(c)-amm
f = open(f"__{who}__.json", "w+")
f.write(str(c))
f.close()
os.chdir("..")
os.chdir("..")
begtimer.start()
stealtimer.start()
def setup(client):
client.add_cog(App(client))
| 2.328125
| 2
|
forager_server/scripts/load_val.py
|
jeremyephron/forager
| 1
|
12779932
|
<reponame>jeremyephron/forager
from google.cloud import storage
from forager_server_api.models import Dataset, DatasetItem
import os
DATASET_NAME = "waymo"
VAL_IMAGE_DIR = "gs://foragerml/waymo/val"
if not VAL_IMAGE_DIR.startswith("gs://"):
raise RuntimeError(
"Directory only supports Google Storage bucket paths. "
'Please specify as "gs://bucket-name/path/to/data".'
)
# Similar to /create_dataset endpoint
split_dir = VAL_IMAGE_DIR[len("gs://") :].split("/")
bucket_name = split_dir[0]
bucket_path = "/".join(split_dir[1:])
client = storage.Client()
bucket = client.get_bucket(bucket_name)
all_blobs = client.list_blobs(bucket, prefix=bucket_path)
dataset = Dataset.objects.get(name=DATASET_NAME)
dataset.val_directory = VAL_IMAGE_DIR
dataset.save()
# Create all the DatasetItems for this dataset
paths = [blob.name for blob in all_blobs]
paths = [
path
for path in paths
if (path.endswith(".jpg") or path.endswith(".jpeg") or path.endswith(".png"))
]
items = [
DatasetItem(
dataset=dataset,
identifier=os.path.basename(path).split(".")[0],
path=path,
is_val=True,
)
for path in paths
]
print(f"Creating {len(items)} new entries")
DatasetItem.objects.bulk_create(items)
| 2.9375
| 3
|
tests/test_cli.py
|
mossblaser/bulk_photo_print
| 0
|
12779933
|
<filename>tests/test_cli.py
import pytest
import os
import sys
from typing import Set, Any
from bulk_photo_print.cli import (
parse_dimension,
ARGUMENTS,
parse_arguments,
ArgumentParserError,
main,
)
TEST_DIR = os.path.dirname(__file__)
TEST_JPEG_PORTRAIT = os.path.join(TEST_DIR, "portrait.jpg")
TEST_JPEG_LANDSCAPE = os.path.join(TEST_DIR, "landscape.jpg")
TEST_JPEG_SQUARE = os.path.join(TEST_DIR, "square.jpg")
class TestParseDimension:
@pytest.mark.parametrize(
"example, exp",
[
# Different number formats
("1", 1.0),
("1.", 1.0),
("123", 123.0),
("1.25", 1.25),
(".25", 0.25),
# Units
("1mm", 1.0),
("1cm", 10.0),
("1 m", 1000.0),
],
)
def test_valid_cases(self, example: str, exp: float) -> None:
assert parse_dimension(example) == exp
@pytest.mark.parametrize(
"example",
[
# Empty string
"",
# No digits
".",
# Just unit
"mm",
# Unknown unit
"100 foo",
],
)
def test_invalid_cases(self, example: str) -> None:
with pytest.raises(ValueError):
parse_dimension(example)
def test_no_duplicate_arguments() -> None:
names: Set[str] = set()
for argument in ARGUMENTS.values():
assert len(argument.argument_names) >= 1
for name in argument.argument_names:
assert name not in names
names.add(name)
class TestParseArguments:
def test_empty(self) -> None:
args = parse_arguments([""])
assert args.page_width == 210
assert args.page_height == 297
assert args.margin == 5
assert args.pictures == []
assert args.output_filename == "out.pdf"
def test_page_dimensions(self) -> None:
args = parse_arguments(["", "--page-dimensions", "100", "200"])
assert args.page_width == 100
assert args.page_height == 200
def test_page_dimensions_bad_args(self) -> None:
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--page-dimensions"])
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--page-dimensions", "100"])
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--page-dimensions", "100", "nope"])
def test_page_dimensions_after_picture(self) -> None:
with pytest.raises(ArgumentParserError):
parse_arguments(["", TEST_JPEG_SQUARE, "--page-dimensions", "100", "200"])
def test_margin(self) -> None:
args = parse_arguments(["", "--margin", "12"])
assert args.margin == 12
def test_margin_bad_args(self) -> None:
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--margin"])
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--margin", "nope"])
def test_margin_after_picture(self) -> None:
with pytest.raises(ArgumentParserError):
parse_arguments(["", TEST_JPEG_SQUARE, "--margin", "100"])
def test_output(self) -> None:
args = parse_arguments(["", "--output", "foo.pdf"])
assert args.output_filename == "foo.pdf"
def test_output_missing(self) -> None:
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--output"])
def test_picture_dimensions(self) -> None:
args = parse_arguments(
[
"",
"--picture-dimensions",
"10",
"20",
TEST_JPEG_SQUARE,
"--picture-dimensions",
"30",
"40",
TEST_JPEG_SQUARE,
]
)
assert len(args.pictures) == 2
assert args.pictures[0].width == 10
assert args.pictures[0].height == 20
assert args.pictures[1].width == 30
assert args.pictures[1].height == 40
def test_picture_dimensions_bad_dimensions(self) -> None:
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--picture-dimensions"])
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--picture-dimensions", "100"])
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--picture-dimensions", "100", "nope"])
def test_scale_or_crop(self) -> None:
args = parse_arguments(
[
"",
"--picture-dimensions",
"10",
"20",
TEST_JPEG_SQUARE,
"--scale",
TEST_JPEG_SQUARE,
"--crop",
TEST_JPEG_SQUARE,
]
)
assert len(args.pictures) == 3
assert args.pictures[0].width == 10
assert args.pictures[0].height == 20
assert args.pictures[1].width == 10
assert args.pictures[1].height == 10
assert args.pictures[2].width == 10
assert args.pictures[2].height == 20
def test_alignment(self) -> None:
args = parse_arguments(
[
"",
"--picture-dimensions",
"1",
"2",
TEST_JPEG_PORTRAIT,
"--alignment",
"0.5",
"0.5",
TEST_JPEG_PORTRAIT,
"--alignment",
"0.0",
"0.0",
TEST_JPEG_PORTRAIT,
]
)
assert len(args.pictures) == 3
assert args.pictures[0].x_offset or args.pictures[0].y_offset
assert args.pictures[0].x_offset == args.pictures[1].x_offset
assert args.pictures[0].y_offset == args.pictures[1].y_offset
assert args.pictures[2].x_offset == 0
assert args.pictures[2].y_offset == 0
def test_alignment_bad(self) -> None:
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--alignment"])
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--alignment", "1"])
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--alignment", "1", "nope"])
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--alignment", "1", "2"])
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--alignment", "1", "-1"])
def test_rotate_for_best_fit(self) -> None:
args = parse_arguments(
[
"",
"--scale",
"--picture-dimensions",
"60",
"90",
TEST_JPEG_LANDSCAPE,
"--rotate-for-best-fit",
TEST_JPEG_LANDSCAPE,
"--no-rotate-for-best-fit",
TEST_JPEG_LANDSCAPE,
]
)
assert len(args.pictures) == 3
assert args.pictures[0].rotate_image
assert args.pictures[1].rotate_image
assert not args.pictures[2].rotate_image
def test_dpi(self) -> None:
args = parse_arguments(
[
"",
"--picture-dimensions",
"100",
"100",
"--max-dpi",
"25.4", # 1 dot per mm
TEST_JPEG_SQUARE,
"--max-dpi",
"0", # unlimited dpi
TEST_JPEG_SQUARE,
]
)
assert len(args.pictures) == 2
assert args.pictures[0].image_width == 100
assert args.pictures[0].image_height == 100
assert args.pictures[1].image_width == 256
assert args.pictures[1].image_height == 256
def test_dpi_bad(self) -> None:
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--max-dpi"])
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--max-dpi", "nope"])
def test_unknown_argument(self) -> None:
with pytest.raises(ArgumentParserError):
parse_arguments(["", "-?"])
with pytest.raises(ArgumentParserError):
parse_arguments(["", "--foo"])
class TestMain:
def test_empty(self, tmpdir: Any, monkeypatch: Any) -> None:
output_filename = str(tmpdir.join("out.pdf"))
monkeypatch.setattr(sys, "argv", ["", "-o", output_filename])
main() # Shouldn't crash
assert os.path.isfile(output_filename)
def test_pictures(self, tmpdir: Any, monkeypatch: Any) -> None:
output_filename = str(tmpdir.join("out.pdf"))
monkeypatch.setattr(
sys,
"argv",
[
"",
"-o",
output_filename,
TEST_JPEG_PORTRAIT,
TEST_JPEG_SQUARE,
TEST_JPEG_LANDSCAPE,
],
)
main() # Shouldn't crash
assert os.path.isfile(output_filename)
def test_argument_error(self, monkeypatch: Any) -> None:
monkeypatch.setattr(sys, "argv", ["", "--foo"])
with pytest.raises(SystemExit):
main()
def test_non_fitting_error(self, monkeypatch: Any) -> None:
monkeypatch.setattr(
sys,
"argv",
[
"",
"--picture-dimensions",
"1000",
"1000",
TEST_JPEG_PORTRAIT,
],
)
with pytest.raises(SystemExit):
main()
| 2.375
| 2
|
get_mismatch.py
|
TuanjieNew/SeqDesignTool
| 0
|
12779934
|
#!/usr/bin/env python
#fn; get_mismatch.py
#ACTGCAGCGTCATAGTTTTTGAG
import os
import copy
def getMismatch(start,seq,name,end):
#name = seq
quality = 'IIIIIIIIIIIIIIIIIIIIII'
OUTFILE = open('./mis_test.fastq','a')
ls = list(seq)
ls_1 = copy.deepcopy(ls)
ii = start+1
for i in ls_1[ii:end]:
if i == 'A':
ls_1[ii] = 'T'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'G'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'C'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
if i == 'T':
ls_1[ii] = 'A'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'G'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'C'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
if i == 'G':
ls_1[ii] = 'T'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'A'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'C'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
if i == 'C':
ls_1[ii] = 'T'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'G'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'A'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ii+=1
seq = 'GCTGCGTCGTCGTAGTTTTTTGG'
getMismatch(-1, seq, seq, 21)
| 2.65625
| 3
|
cride/circles/migrations/0004_auto_20200525_1916.py
|
mariogonzcardona/platzi-cride
| 0
|
12779935
|
# Generated by Django 2.0.10 on 2020-05-25 19:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('circles', '0003_auto_20200525_1531'),
]
operations = [
migrations.RemoveField(
model_name='membership',
name='is_Active',
),
migrations.AddField(
model_name='membership',
name='is_active',
field=models.BooleanField(default=True, help_text='Only active users are allowed to interact in the circle.', verbose_name='active status'),
),
migrations.AlterField(
model_name='membership',
name='is_admin',
field=models.BooleanField(default=False, help_text="Circle admins can update the circle's data and manage its members.", verbose_name='circle admin'),
),
migrations.AlterField(
model_name='membership',
name='remaining_invitations',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.AlterField(
model_name='membership',
name='used_invitations',
field=models.PositiveSmallIntegerField(default=0),
),
]
| 1.75
| 2
|
saveauth.py
|
jimfenton/notif-notifier
| 0
|
12779936
|
<filename>saveauth.py<gh_stars>0
#!/usr/bin/python
# saveauth.py - Save notif address for use by clockwatcherd
#
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
__version__="0.1.0"
import cgi
print """Content-Type: text/html
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Clockwatcher</title>
</head>
<body>
"""
form = cgi.FieldStorage()
if "addr" not in form:
print "<h1>Error</h1>"
print "Authorization ID not present"
exit()
if "maxpri" not in form:
print "<h1>Error</h1>"
print "Max priority not present"
exit()
with open("/etc/clockwatcher/newwatchers.cfg", "a") as config:
config.write(form["addr"].value)
print "<h1>Nōtif authorized</h1>"
print "<p>"
print "Have a good time!"
print "</p>"
print "</body></html>"
| 2.046875
| 2
|
Module2/Day15/module2_day_15_dictionaries.py
|
sydneybeal/100DaysPython
| 2
|
12779937
|
<gh_stars>1-10
"""
Author: <REPLACE>
Project: 100DaysPython
File: module1_day15_dictionaries.py
Creation Date: <REPLACE>
Description: <REPLACE>
"""
conversion = {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}
print(conversion)
print(conversion["a"])
menu = {"item1": ["egg", "spam", "bacon"],
"item2": ["egg", "sausage", "spam"],
"item3": ["egg", "spam"],
"item4": ["egg", "bacon", "spam"],
"item5": ["egg", "bacon", "sausage", "spam"],
"item6": ["spam", "bacon", "sausage", "spam"],
"item7": ["spam", "egg", "spam", "spam", "bacon", "spam"],
"item8": ["spam", "egg", "sausage", "spam"]}
print(menu["item1"])
print(type(menu["item7"]))
menu["item2"][2] = "spam"
print(menu["item2"])
# List Comparison
l_ministry1 = ["silly", "walks"]
l_ministry2 = ["walks", "silly"]
print(l_ministry1 == l_ministry2)
# Dictionary Comparison
d_ministry1 = {"a": "silly", "b": "walks"}
d_ministry2 = {"b": "walks", "a": "silly"}
print(d_ministry1 == d_ministry2)
print(menu.keys())
print(menu.values())
ordered_keys = list(menu.keys())
print(ordered_keys)
ordered_keys.sort(reverse=True)
print(ordered_keys)
menu_tuple = tuple(menu.items())
print(menu_tuple)
print(type(menu_tuple))
print(menu_tuple[0])
print(type(menu_tuple[0]))
# Slicing the key/value tuple to obtain the key.
print(menu_tuple[0][0])
print(type(menu_tuple[0][0]))
# Slicing the key/value tuple to obtain the value.
print(menu_tuple[0][1])
print(type(menu_tuple[0][1]))
# Slicing the second item in the value list.
print(menu_tuple[0][1][1])
print(type(menu_tuple[0][1][1]))
print(menu["item1"][1])
print(menu_tuple[0][1][1] == menu["item1"][1])
order = "item9"
if menu.get(order, 0) == 0:
print("{} is not a valid dish. Please try again.".format(order))
else:
print("{} contains {}".format(order, menu.get(order)))
transportation = {"name": "coconut", "color": "brown"}
print(transportation.items())
transportation.setdefault("received_by", "swallow")
print(transportation.items())
transportation.setdefault("received_by", "found_on_ground")
print(transportation.items())
| 2.734375
| 3
|
src/ci_systems/travis.py
|
nifadyev/build-status-notifier
| 6
|
12779938
|
<reponame>nifadyev/build-status-notifier
"""Module for dealing with Travis CI API."""
import time
from typing import Dict, Any, List, Tuple, Optional
import requests
from requests import Response
from ..notifiers.slack import Slack
from ..custom_types import BUILD
class Travis():
"""Class for sending requests via Travis API and parsing their response."""
def __init__(self, config: Dict[str, Any]) -> None:
"""Initialize class instance.
Args:
config: Travis API specific credentials.
Attributes:
token: unique key.
author: author alias.
repository_id: repository ID in Travis.
frequency: interval in seconds between consecutive requests to API.
"""
self.token = config['token']
self.author = config['author']['login']
self.repository_id = config['repositories'][0]['id']
self.frequency = config['request_frequency']
def execute_command(self, command: str, args: List = None) -> None:
"""Execute supported command with optional arguments.
Args:
command: valid keyword.
args: optional arguments for some commands.
"""
if command == 'monitor':
print(f'Executing command {command} {args or ""}')
self.monitor_builds()
else:
print('Unsupported command')
def monitor_builds(self) -> None:
"""Check build statuses for specified author and repository with set frequency.
Check if there are any active builds of specific author.
If yes, run endless loop until all active builds are finished.
"""
initial_builds = self.get_builds_in_progress()
if not initial_builds:
print('There are no builds in progress')
return
while True:
builds_in_progress = self.get_builds_in_progress()
print(f'{len(builds_in_progress)} builds are still in progress')
if builds_in_progress == initial_builds:
time.sleep(self.frequency)
continue
for build in self.get_finished_builds(initial_builds, builds_in_progress):
is_build_finished = self.handle_build(build)
if not is_build_finished:
continue
if not builds_in_progress:
print('Finishing monitoring')
Slack.notify(message='All builds are finished')
break
initial_builds = builds_in_progress
time.sleep(self.frequency)
def get_builds_in_progress(self) -> Tuple[BUILD]:
"""Return list of active builds for specified repository and author.
Build is active if its status is `created` or `started`.
Returns:
tuple: sequence of active author's builds.
"""
request = self.request_factory(f'repo/{self.repository_id}/builds')
builds = request.json()['builds']
return tuple(
self.parse_build(build) for build in builds if self._is_build_in_progress(build))
def _is_build_in_progress(self, build: Dict[str, Any]) -> bool:
"""Check build status and author.
Args:
build: raw information about build from response.
Returns:
bool: indicates if build is in progress.
"""
return (
build['created_by']['login'] == self.author
and build['state'] in ('started', 'created', 'pending', 'running')
)
def handle_build(self, build: BUILD) -> bool:
"""Deal with build and call Slack method if build is finished.
Arguments:
build: necessary information about build.
Returns:
bool: indicates if message is sent successfully.
"""
build_response = self.request_factory(f'build/{build["id"]}')
build = self.parse_build(build=build_response.json())
# Do not send message if build status has changed from `created` to `started`
if build['status'] in ('started', 'created', 'pending', 'running'):
return False
if build['status'] == 'failed':
jobs_response = self.request_factory(f'build/{build["id"]}/jobs')
failed_job_id = self.get_failed_job_id(jobs_response.json())
log_response = self.request_factory(f'job/{failed_job_id}/log')
log = log_response.json()['content']
# Last 3 strings are useless
error_strings = log.splitlines()[-8:-3]
status_code = Slack.notify(build, error_strings, failed_job_id)
else:
status_code = Slack.notify(build)
return status_code == '200'
@staticmethod
def get_finished_builds(
initial_builds: Tuple[BUILD], builds_in_progress: Tuple[BUILD]) -> Tuple[BUILD]:
"""Return finished builds by comparing builds in progress with initial builds.
Args:
initial_builds: sequence of initial builds.
builds_in_progress: sequence of builds in progress.
Returns:
tuple: sequence of finished builds.
"""
return tuple(build for build in initial_builds if build not in builds_in_progress)
@staticmethod
def parse_build(build: Dict) -> BUILD:
"""Retrieve necessary information from raw build response.
Arguments:
build: raw response.
Returns:
dict: necessary information about build.
"""
return {
'id': build['id'],
'event': build['event_type'],
'status': build['state'],
'branch': build['branch']['name'],
'message': build['commit']['message'],
'duration': build['duration'],
'pr_url': build['commit']['compare_url'],
'commit_sha': build['commit']['sha']
}
@staticmethod
def get_failed_job_id(jobs: Dict) -> Optional[str]:
"""Return ID of failed Travis job.
Args:
jobs: information about all build jobs.
Returns:
str: job ID.
"""
for job in jobs['jobs']:
if job['state'] == 'failed':
return job['id']
def request_factory(self, path: str) -> Response:
"""Make request to Travis API with provided path.
Args:
path: path to specific resource.
Returns:
Response: response from API.
"""
return requests.get(
f'https://api.travis-ci.com/{path}',
headers={
'Travis-API-Version': '3',
'User-Agent': 'API Explorer',
'Authorization': f'token {self.token}'
}
)
| 2.515625
| 3
|
quantum_compiler/drawing.py
|
Debskij/QuantumCompiler
| 2
|
12779939
|
<reponame>Debskij/QuantumCompiler<filename>quantum_compiler/drawing.py<gh_stars>1-10
import matplotlib.pyplot
import typing
def draw_axes() -> None:
"""Draw axes on the plane."""
points = [[1.2, 0], [0, 1.2], [-1.2, 0], [0, -1.2]] # dummy points for zooming out
arrows = [[1.1, 0], [0, 1.1], [-1.1, 0], [0, -1.1]] # coordinates for the axes
for p in points:
matplotlib.pyplot.plot(p[0], p[1] + 0.1) # drawing dummy points
for a in arrows:
matplotlib.pyplot.arrow(0, 0, a[0], a[1], head_width=0.04, head_length=0.08) # drawing the axes
def draw_unit_circle() -> None:
"""Draw unit circle on the plane."""
unit_circle = matplotlib.pyplot.Circle((0, 0), 1, color="black", fill=False)
matplotlib.pyplot.gca().add_patch(unit_circle)
def draw_quantum_state(coords: typing.List[int], name: str, color: str = "blue") -> None:
"""
Draw quantum state of qbit.
:param coords: coordinates of quantum state on the plane.
:param name: name of the quantum state to plot
:param color: color of drawn arrow
:return: None
"""
x, y = list(coords)
x1 = 0.92 * x
y1 = 0.92 * y
matplotlib.pyplot.arrow(0, 0, x1, y1, head_width=0.04, head_length=0.08, color=color)
x2 = 1.15 * x
y2 = 1.15 * y
matplotlib.pyplot.text(x2, y2, name)
def draw_qbit() -> None:
"""Draw sample qbits on the plane."""
matplotlib.pyplot.figure(figsize=(6, 6), dpi=60) # draw a figure
# draw the origin
matplotlib.pyplot.plot(0, 0, "ro") # a point in red color
draw_axes() # drawing the axes by using one of our predefined function
draw_unit_circle() # drawing the unit circle by using one of our predefined function
# drawing |0>
matplotlib.pyplot.plot(1, 0, "o")
matplotlib.pyplot.text(1.05, 0.05, "|0>")
# drawing |1>
matplotlib.pyplot.plot(0, 1, "o")
matplotlib.pyplot.text(0.05, 1.05, "|1>")
# drawing -|0>
matplotlib.pyplot.plot(-1, 0, "o")
matplotlib.pyplot.text(-1.2, -0.1, "-|0>")
# drawing -|1>
matplotlib.pyplot.plot(0, -1, "o")
matplotlib.pyplot.text(-0.2, -1.1, "-|1>")
| 3.03125
| 3
|
tests/pyspark_testing/integration/test_driver.py
|
msukmanowsky/pyspark-testing
| 24
|
12779940
|
<reponame>msukmanowsky/pyspark-testing
from ... import relative_file
from . import PySparkIntegrationTest
from pyspark_testing import driver
from pyspark_testing.models import BroadbandCoverageInfo
class TestDriver(PySparkIntegrationTest):
def setUp(self):
self.data = (self.sc.textFile(driver.data_path(), use_unicode=False)
.map(lambda l: l.decode('latin_1'))
.map(BroadbandCoverageInfo.from_csv_line))
# def test_top_unserved(self):
# driver.top_unserved()
def test_summary_stats(self):
expected_stats = {
'broadband_available': 8714,
'broadband_unavailable': 41285,
'dsl_available': 14858,
'dsl_unavailable': 35141,
'wireless_available': 30971,
'wireless_unavailable': 19028
}
self.assertDictEqual(expected_stats, driver.summary_stats(self.data))
| 2.546875
| 3
|
cpu_bound.py
|
nagibinau/Multi-Group-26
| 0
|
12779941
|
<gh_stars>0
import concurrent.futures
import timeit
from hashlib import md5
from random import choice
def mining_money(worker_id):
start_time = timeit.default_timer()
search_time = 0
while True:
s = "".join([choice("0123456789") for _ in range(50)])
h = md5(s.encode('utf8')).hexdigest()
if h.endswith("00000"):
print(s, h)
search_time = timeit.default_timer() - start_time - search_time
print(f'The coin was found for {search_time} seconds, worker id: {worker_id}\n')
def main():
max_workers = int(input('Set count workers: '))
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
for i in range(1, max_workers + 1):
executor.submit(mining_money, i)
if __name__ == '__main__':
main()
| 2.875
| 3
|
0004-Median-of-Two-Sorted-Arrays.py
|
Sax-Ted/Practicing-with-Leetcode
| 0
|
12779942
|
<reponame>Sax-Ted/Practicing-with-Leetcode
import math
class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
data = nums1 + nums2
data = sorted(data)
if len(data) % 2 != 0:
return data[math.ceil(len(data)/2) - 1]
else:
return (data[int(len(data) / 2 - 1)] + data[int(len(data) / 2)]) / 2
| 3.359375
| 3
|
test/example_3nu_vacuum_coeffs.py
|
mbustama/NuOscProbExact
| 14
|
12779943
|
# -*- coding: utf-8 -*-
r"""Run the vacuum coefficients 3nu example shown in README.md.
Runs the three-neutrino example of coefficients for oscillations in
vacuum shown in README.md
References
----------
.. [1] <NAME>, "Exact neutrino oscillation probabilities:
a fast general-purpose computation method for two and three neutrino
flavors", arXiv:1904.XXXXX.
Created: 2019/04/29 23:48
Last modified: 2019/04/29 23:48
"""
from __future__ import print_function
__version__ = "1.0"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import sys
sys.path.append('../src')
import numpy as np
import oscprob3nu
import hamiltonians3nu
from globaldefs import *
energy = 1.e9 # Neutrino energy [eV]
baseline = 1.3e3 # Baseline [km]
h_vacuum_energy_indep = \
hamiltonians3nu.hamiltonian_3nu_vacuum_energy_independent( S12_NO_BF,
S23_NO_BF,
S13_NO_BF,
DCP_NO_BF,
D21_NO_BF,
D31_NO_BF)
h_vacuum = np.multiply(1./energy, h_vacuum_energy_indep)
h1, h2, h3, h4, h5, h6, h7, h8 = \
oscprob3nu.hamiltonian_3nu_coefficients(h_vacuum)
print('h1: {:.4e}'.format(h1))
print('h2: {:.4e}'.format(h2))
print('h3: {:.4e}'.format(h3))
print('h4: {:.4e}'.format(h4))
print('h5: {:.4e}'.format(h5))
print('h6: {:.4e}'.format(h6))
print('h7: {:.4e}'.format(h7))
print('h8: {:.4e}'.format(h8))
print()
u0, u1, u2, u3, u4, u5, u6, u7, u8 = \
oscprob3nu.evolution_operator_3nu_u_coefficients( \
h_vacuum,
baseline*CONV_KM_TO_INV_EV)
print('u0: {:.4f}'.format(u0))
print('u1: {:.4f}'.format(u1))
print('u2: {:.4f}'.format(u2))
print('u3: {:.4f}'.format(u3))
print('u4: {:.4f}'.format(u4))
print('u5: {:.4f}'.format(u5))
print('u6: {:.4f}'.format(u6))
print('u7: {:.4f}'.format(u7))
print('u8: {:.4f}'.format(u8))
print()
evol_operator = \
oscprob3nu.evolution_operator_3nu(h_vacuum, baseline*CONV_KM_TO_INV_EV)
print('U3 = ')
with np.printoptions(precision=3, suppress=True):
print(np.array(evol_operator))
| 2.4375
| 2
|
answers/AkshajV1309/Day14/D14Q1.py
|
arc03/30-DaysOfCode-March-2021
| 22
|
12779944
|
<reponame>arc03/30-DaysOfCode-March-2021
def remdupli(S):
if len(S)==1:
return S
if S[0]==S[1]:
return remdupli(S[1:])
return S[0]+remdupli(S[1:])
S=input('Enter string: ')
print('Output:',remdupli(S))
| 3.609375
| 4
|
atom/nucleus/python/test/test_roundup_api.py
|
sumit4-ttn/SDK
| 0
|
12779945
|
# coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.api.roundup_api import RoundupApi # noqa: E501
from nucleus_api.rest import ApiException
class TestRoundupApi(unittest.TestCase):
"""RoundupApi unit test stubs"""
def setUp(self):
self.api = nucleus_api.api.roundup_api.RoundupApi() # noqa: E501
def tearDown(self):
pass
def test_create_roundup_settings_using_post(self):
"""Test case for create_roundup_settings_using_post
Create a Roundup Settings # noqa: E501
"""
pass
def test_create_roundup_using_post(self):
"""Test case for create_roundup_using_post
Create a roundup # noqa: E501
"""
pass
def test_delete_roundup_settings_using_delete(self):
"""Test case for delete_roundup_settings_using_delete
Delete a roundup settings # noqa: E501
"""
pass
def test_get_roundup_all_using_get(self):
"""Test case for get_roundup_all_using_get
List all roundups # noqa: E501
"""
pass
def test_get_roundup_settings_all_using_get(self):
"""Test case for get_roundup_settings_all_using_get
List all roundup settings # noqa: E501
"""
pass
def test_get_roundup_settings_using_get(self):
"""Test case for get_roundup_settings_using_get
Retrieve a Roundup Setting # noqa: E501
"""
pass
def test_get_roundup_using_get(self):
"""Test case for get_roundup_using_get
Retrieve a Roundup # noqa: E501
"""
pass
def test_update_roundup_settings_using_put(self):
"""Test case for update_roundup_settings_using_put
Update a roundup settings # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 1.703125
| 2
|
outingbox/views.py
|
kartikanand/outing-box
| 0
|
12779946
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from watson import search
from .models import Box, Activity, Category, SubZone, UserBookmark, UserRating, FeaturedActivity, UserReview
from .forms import FeedbackForm
from .decorators import require_user_authenticated, require_activity
import logging
logger = logging.getLogger(__name__)
def get_paginated_list(lst, num_objects_on_page, page):
paginator = Paginator(lst, num_objects_on_page)
try:
paginated_list = paginator.page(page)
except PageNotAnInteger:
paginated_list = paginator.page(1)
except EmptyPage:
paginated_list = paginator.page(paginator.num_pages)
return paginated_list
def handler404(request):
response = render(request, 'outingbox/404.html', {})
response.status_code = 404
return response
def handler500(request):
response = render(request, 'outingbox/500.html', {})
response.status_code = 500
return response
def index_view(request):
boxes = Box.objects.all()
featured_set = FeaturedActivity.objects.all()
featured = []
if featured_set.count() > 0:
featured = featured_set[0]
return render(request, 'outingbox/index.html', {'boxes': boxes, 'featured': featured})
def contact_us_view(request):
if request.method == 'GET':
form = FeedbackForm()
else:
form = FeedbackForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('feedback-thanks'))
return render(request, 'outingbox/contact-us.html', {'form': form})
def contact_us_thanks(request):
return render(request, 'outingbox/contact-us.html', {'thanks': True})
def about_us_view(request):
return render(request, 'outingbox/about-us.html')
def box_view(request, id=None, title=None):
box = get_object_or_404(Box, pk=id)
categories = box.category_set.all()
activities = Activity.objects.filter(category__in=categories).distinct()
# Default to page 1
page = request.GET.get('page', 1)
activities = get_paginated_list(activities, 12, page)
url_prev_page_number = None
if activities.has_previous():
url_prev_page_number = add_page_to_request_url(request, 'box', {'page': activities.previous_page_number()}, kwargs={'id':id, 'title':box.title})
url_next_page_number = None
if activities.has_next():
url_next_page_number = add_page_to_request_url(request, 'box', {'page': activities.next_page_number()}, kwargs={'id':id, 'title':box.title})
return render(request, 'box/box.html', {
'box': box,
'activities': activities,
'url_next_page_number': url_next_page_number,
'url_prev_page_number': url_prev_page_number
})
@login_required
def profile_bookmarks_view(request):
try:
user_bookmark_inst = UserBookmark.objects.get(user=request.user)
bookmarks = user_bookmark_inst.bookmarks.all()
except UserBookmark.DoesNotExist:
bookmarks = []
page = request.GET.get('page', 1)
bookmarks = get_paginated_list(bookmarks, 12, page)
url_prev_page_number = None
if bookmarks.has_previous():
url_prev_page_number = add_page_to_request_url(request, 'profile_bookmarks', {'page': bookmarks.previous_page_number()})
url_next_page_number = None
if bookmarks.has_next():
url_next_page_number = add_page_to_request_url(request, 'profile_bookmarks', {'page': bookmarks.next_page_number()})
return render(request, 'account/bookmarks.html', {
'bookmarks': bookmarks,
'url_next_page_number': url_next_page_number,
'url_prev_page_number': url_prev_page_number
})
def activity_view(request, id=None, title=None):
activity = get_object_or_404(Activity, pk=id)
user_bookmarks = None
user_rating = 0
user_review = None
if request.user.is_authenticated():
try:
user_bookmark_inst = UserBookmark.objects.get(user=request.user)
user_bookmarks = user_bookmark_inst.bookmarks.all()
except UserBookmark.DoesNotExist:
pass
try:
user_rating_inst = UserRating.objects.get(user=request.user, activity=activity)
user_rating = user_rating_inst.rating
except UserRating.DoesNotExist:
pass
try:
user_review = UserReview.objects.get(user=request.user, activity=activity)
except UserReview.DoesNotExist:
pass
reviews = UserReview.objects.filter(activity=activity)
context = {
'activity': activity,
'bookmarks': user_bookmarks,
'photos': activity.photos.all(),
'reviews': reviews,
'user_rating': user_rating,
'user_review': user_review
}
return render(request, 'activity/activity.html', context)
@login_required
def profile_view(request):
try:
user_bookmark_inst = UserBookmark.objects.get(user=request.user)
bookmarks = user_bookmark_inst.bookmarks.all()[:3]
except UserBookmark.DoesNotExist:
bookmarks = []
return render(request, 'account/profile.html', {
'bookmarks': bookmarks
});
@csrf_protect
@require_POST
@require_user_authenticated
@require_activity
def rate_activity(request, activity):
delete_rating = request.POST.get('delete', None)
if delete_rating:
try:
user_rating_inst = UserRating.objects.get(user=request.user, activity=activity)
old_rating = user_rating_inst.rating
if activity.votes == 1:
activity.rating = 0
else:
activity.rating = (activity.rating*activity.votes - old_rating)/(activity.votes-1)
activity.votes = activity.votes - 1
activity.save()
user_rating_inst.delete()
except UserRating.DoesNotExist:
pass
return JsonResponse({'msg': 'ok', 'status': '0'})
rating_str = request.POST.get('new_rating', None)
if not rating_str:
res = JsonResponse({'msg': 'invalid rating', 'status': '1'})
res.status_code = 400
return res
# query string params are always string; coerce to int
try:
rating = int(rating_str)
except ValueError:
res = JsonResponse({'msg': 'invalid rating', 'status': '1'})
res.status_code = 400
return res
if (rating > 5) or (rating <= 0):
res = JsonResponse({'msg': 'invalid rating', 'status': '1'})
res.status_code = 400
return res
old_rating = None
try:
user_rating_inst = UserRating.objects.get(user=request.user, activity=activity)
old_rating = user_rating_inst.rating
except UserRating.DoesNotExist:
user_rating_inst = UserRating(user=request.user, activity=activity)
if old_rating is None:
user_rating_inst.rating = rating
user_rating_inst.save()
activity.rating = (activity.rating*activity.votes+rating)/(activity.votes+1)
activity.votes = activity.votes + 1
activity.save()
elif old_rating != rating:
user_rating_inst.rating = rating
user_rating_inst.save()
activity.rating = (activity.rating*activity.votes - old_rating + rating)/(activity.votes)
activity.save()
return JsonResponse({'msg': 'ok', 'status': '0'})
@csrf_protect
@require_POST
@require_user_authenticated
@require_activity
def bookmark_activity(request, activity):
user_bookmark_inst, created = UserBookmark.objects.get_or_create(user=request.user)
delete_bookmark = request.POST.get('delete', None)
if delete_bookmark:
user_bookmark_inst.bookmarks.remove(activity)
else:
user_bookmark_inst.bookmarks.add(activity)
return JsonResponse({'msg': 'ok', 'status': '0'})
@csrf_protect
@require_POST
@require_user_authenticated
@require_activity
def comment_activity(request, activity):
delete_review = request.POST.get('delete', None)
if delete_review:
try:
user_review_inst = UserReview.objects.get(user=request.user, activity=activity)
user_review_inst.delete()
except UserReview.DoesNotExist:
pass
return JsonResponse({
'msg': 'ok',
'status': '0'
})
review = request.POST.get('review', '')
if not review or len(review) > 512:
res = JsonResponse({'msg': 'comment too long/short', 'status': '1'})
return res
try:
user_review_inst = UserReview.objects.get(user=request.user, activity=activity)
except UserReview.DoesNotExist:
user_review_inst = UserReview(user=request.user, activity=activity)
user_review_inst.review = review
user_review_inst.pub_date = timezone.now()
user_review_inst.is_published = True
user_review_inst.save()
date_format = '%b. %d, %Y'
return JsonResponse({
'msg': 'ok',
'status': '0',
'date': user_review_inst.pub_date.strftime(date_format),
'username': request.user.username
})
# Ensure _new_params to be a dictionary
def add_page_to_request_url(request, view_name, _new_params, kwargs=None):
_dict = request.GET.copy()
# Django query dict update method appends instead of replacing the value if a key is present in both dicts
# Therefore remove page from original dict
try:
_dict.pop('page')
except KeyError:
pass
_dict.update(_new_params)
return reverse(view_name, kwargs=kwargs)+'?'+_dict.urlencode()
def get_search_filter_urls(request, order_by):
_dict = request.GET.copy()
_dict['page'] = 1
_dict['ob'] = order_by
return reverse('search')+'?'+_dict.urlencode()
def search_view(request):
query = request.GET.get('query', '')
page = request.GET.get('page', 1)
order_by = request.GET.get('ob', '')
sub_zones_selected_list = request.GET.getlist('sz', [])
categories_selected_list = request.GET.getlist('c', [])
sub_zone_list = SubZone.objects.all_name_value_list()
category_list = Category.objects.all_name_value_list()
activities = Activity.objects.all()
int_sub_zones_selected_list = []
if sub_zones_selected_list:
for sub_zone in sub_zones_selected_list:
try:
int_sub_zone = int(sub_zone)
int_sub_zones_selected_list.append(int_sub_zone)
except ValueError:
raise Http404("No results")
activities = activities.filter(address__sub_zone__in=int_sub_zones_selected_list)
int_categories_selected_list = []
if categories_selected_list:
for category in categories_selected_list:
try:
int_category = int(category)
int_categories_selected_list.append(int_category)
except ValueError:
raise Http404("No results")
activities = activities.filter(category__in=int_categories_selected_list)
if query:
activities = search.filter(activities, query)
activities = activities.distinct()
order_dict = {
'raa': 'rating', # Rating ascending
'rad': '-rating', # Rating descending
'pra': 'cost', # Price ascending
'prd': '-cost' # Price descending
}
if order_by:
activities = activities.order_by(order_dict[order_by])
results_paginator = Paginator(activities, 10)
try:
results_page = results_paginator.page(page)
except PageNotAnInteger:
results_page = results_paginator.page(1)
except EmptyPage:
results_page = results_paginator.page(results_paginator.num_pages)
activities = results_page
order_by_relevance_url = get_search_filter_urls(request, '')
order_by_rating_url = get_search_filter_urls(request, 'rad')
order_by_price_url = get_search_filter_urls(request, 'pra')
url_prev_page_number = None
url_next_page_number = None
if activities.has_previous():
url_prev_page_number = add_page_to_request_url(request, 'search', {'page': activities.previous_page_number()})
if activities.has_next():
url_next_page_number = add_page_to_request_url(request, 'search', {'page': activities.next_page_number()})
bookmarks = None
if request.user.is_authenticated():
try:
user_bookmark_inst = UserBookmark.objects.get(user=request.user)
bookmarks = user_bookmark_inst.bookmarks.all()
except UserBookmark.DoesNotExist:
pass
context = {
'activities': activities,
'order_by_relevance_url': order_by_relevance_url,
'order_by_rating_url': order_by_rating_url,
'order_by_price_url': order_by_price_url,
'url_next_page_number': url_next_page_number,
'url_prev_page_number': url_prev_page_number,
'sub_zone_list': sub_zone_list,
'category_list': category_list,
'sub_zones_selected_list': int_sub_zones_selected_list,
'categories_selected_list': int_categories_selected_list,
'query': query,
'page': page,
'bookmarks': bookmarks
}
return render(request, 'search/search.html', context)
| 1.953125
| 2
|
examples/example.py
|
jsphon/flumine
| 0
|
12779947
|
import time
import logging
import betfairlightweight
from betfairlightweight.filters import streaming_market_filter
from pythonjsonlogger import jsonlogger
from flumine import Flumine, clients, BaseStrategy
from flumine.order.trade import Trade
from flumine.order.ordertype import LimitOrder
from flumine.order.order import OrderStatus
logger = logging.getLogger()
custom_format = "%(asctime) %(levelname) %(message)"
log_handler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter(custom_format)
formatter.converter = time.gmtime
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
logger.setLevel(logging.INFO)
class ExampleStrategy(BaseStrategy):
def start(self):
# subscribe to streams
print("starting strategy 'ExampleStrategy'")
def check_market_book(self, market, market_book):
# process_market_book only executed if this returns True
if market_book.status != "CLOSED":
return True
def process_market_book(self, market, market_book):
# process marketBook object
for runner in market_book.runners:
if (
runner.status == "ACTIVE"
and runner.last_price_traded
and runner.selection_id == 11982403
):
trade = Trade(
market_id=market_book.market_id,
selection_id=runner.selection_id,
handicap=runner.handicap,
strategy=self,
)
order = trade.create_order(
side="LAY", order_type=LimitOrder(price=1.01, size=2.00)
)
self.place_order(market, order)
def process_orders(self, market, orders):
for order in orders:
if order.status == OrderStatus.EXECUTABLE:
if order.elapsed_seconds and order.elapsed_seconds > 5:
# print(order.bet_id, order.average_price_matched, order.size_matched)
if order.size_remaining == 2.00:
self.cancel_order(market, order, size_reduction=1.51)
# self.update_order(market, order, "PERSIST")
# if order.order_type.price == 1.01 and order.size_remaining == 0.49:
# self.replace_order(market, order, 1.02)
# if order.order_type.price == 1.02:
# self.replace_order(market, order, 1.03)
# if order.order_type.price == 1.03:
# self.replace_order(market, order, 1.05)
pass
trading = betfairlightweight.APIClient("username")
client = clients.BetfairClient(trading)
framework = Flumine(client=client)
strategy = ExampleStrategy(
market_filter=streaming_market_filter(market_ids=["1.170378175"]),
streaming_timeout=2,
)
framework.add_strategy(strategy)
framework.run()
| 2.140625
| 2
|
exercise/Exercism/python/largest-series-product/largest_series_product.py
|
orca-j35/python-notes
| 1
|
12779948
|
from operator import mul
from functools import reduce
def largest_product(series, size):
if size == 0:
return 1
if any((size > len(series), not series.isdecimal(), size < 0)):
raise ValueError('size is wrong value')
max_v = 0
for i in range(0, len(series) - size + 1):
max_v = max(reduce(mul, [int(j) for j in series[i:i + size]]), max_v)
return max_v
| 3.1875
| 3
|
tests/unit_test/chat/chat_test.py
|
rit1200/kairon
| 9
|
12779949
|
from unittest.mock import patch
from urllib.parse import urlencode, quote_plus
from kairon.shared.utils import Utility
import pytest
import os
from mongoengine import connect, ValidationError
from kairon.shared.chat.processor import ChatDataProcessor
from re import escape
import responses
class TestChat:
@pytest.fixture(autouse=True, scope='class')
def setup(self):
os.environ["system_file"] = "./tests/testing_data/system.yaml"
Utility.load_environment()
db_url = Utility.environment['database']["url"]
pytest.db_url = db_url
connect(**Utility.mongoengine_connection(Utility.environment['database']["url"]))
def test_save_channel_config_invalid(self):
with pytest.raises(ValidationError, match="Invalid channel type custom"):
ChatDataProcessor.save_channel_config({"connector_type": "custom",
"config": {
"bot_user_oAuth_token": "<PASSWORD>",
"slack_signing_secret": "<KEY>"}},
"test",
"test")
with pytest.raises(ValidationError,
match=escape("Missing ['bot_user_oAuth_token', 'slack_signing_secret'] all or any in config")):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"slack_signing_secret": "<KEY>"}},
"test",
"test")
with pytest.raises(ValidationError,
match=escape("Missing ['bot_user_oAuth_token', 'slack_signing_secret'] all or any in config")):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"bot_user_oAuth_token": "<PASSWORD>01939352912-801478018484-v3zq6MYNu62oSs8vammWOY8K",
}},
"test",
"test")
def test_save_channel_config(self):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"bot_user_oAuth_token": "<PASSWORD>",
"slack_signing_secret": "<KEY>"}},
"test",
"test")
def test_update_channel_config(self):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"bot_user_oAuth_token": "<KEY>",
"slack_signing_secret": "<KEY>"}},
"test",
"test")
slack = ChatDataProcessor.get_channel_config("slack", "test", mask_characters=False)
assert slack.get("connector_type") == "slack"
assert str(slack["config"].get("bot_user_oAuth_token")).startswith("Test")
assert not str(slack["config"].get("slack_signing_secret")).__contains__("***")
def test_list_channel_config(self):
channels = list(ChatDataProcessor.list_channel_config("test"))
slack = channels[0]
assert channels.__len__() == 1
assert slack.get("connector_type") == "slack"
assert str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert str(slack["config"].get("slack_signing_secret")).__contains__("***")
channels = list(ChatDataProcessor.list_channel_config("test", mask_characters=False))
slack = channels[0]
assert channels.__len__() == 1
assert slack.get("connector_type") == "slack"
assert not str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert not str(slack["config"].get("slack_signing_secret")).__contains__("***")
def test_get_channel_config_slack(self):
slack = ChatDataProcessor.get_channel_config("slack", "test")
assert slack.get("connector_type") == "slack"
assert str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert str(slack["config"].get("slack_signing_secret")).__contains__("***")
slack = ChatDataProcessor.get_channel_config("slack", "test", mask_characters=False)
assert slack.get("connector_type") == "slack"
assert not str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert not str(slack["config"].get("slack_signing_secret")).__contains__("***")
def test_delete_channel_config_slack(self):
ChatDataProcessor.delete_channel_config("slack", "test")
assert list(ChatDataProcessor.list_channel_config("test")).__len__() == 0
@responses.activate
def test_save_channel_config_telegram(self):
access_token = "<PASSWORD>"
webhook = urlencode({'url': "https://test@test.com/api/bot/telegram/tests/test"}, quote_via=quote_plus)
responses.add("GET",
json={'result': True},
url=f"{Utility.system_metadata['channels']['telegram']['api']['url']}/bot{access_token}/setWebhook?{webhook}")
def __mock_endpoint(*args):
return f"https://test@test.com/api/bot/telegram/tests/test"
with patch('kairon.shared.data.utils.DataUtility.get_channel_endpoint', __mock_endpoint):
ChatDataProcessor.save_channel_config({"connector_type": "telegram",
"config": {
"access_token": access_token,
"webhook_url": webhook,
"username_for_bot": "test"}},
"test",
"test")
@responses.activate
def test_save_channel_config_telegram_invalid(self):
access_token = "<PASSWORD>"
webhook = {'url': "https://test@test.com/api/bot/telegram/tests/test"}
webhook = urlencode(webhook, quote_via=quote_plus)
responses.add("GET",
json={'result': False, 'error_code': 400, 'description': "Invalid Webhook!"},
url=f"{Utility.system_metadata['channels']['telegram']['api']['url']}/bot{access_token}/setWebhook?{webhook}")
with pytest.raises(ValidationError, match="Invalid Webhook!"):
def __mock_endpoint(*args):
return f"https://test@test.com/api/bot/telegram/tests/test"
with patch('kairon.shared.data.utils.DataUtility.get_channel_endpoint', __mock_endpoint):
ChatDataProcessor.save_channel_config({"connector_type": "telegram",
"config": {
"access_token": access_token,
"webhook_url": webhook,
"username_for_bot": "test"}},
"test",
"test")
| 1.835938
| 2
|
detector/attributes/const_functions_state.py
|
caomingpei/smart-contract-vulnerability-detector
| 2
|
12779950
|
<reponame>caomingpei/smart-contract-vulnerability-detector
"""
Module detecting constant functions
Recursively check the called functions
"""
from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification
from slither.formatters.attributes.const_functions import custom_format
class ConstantFunctionsState(AbstractDetector):
"""
Constant function detector
"""
ARGUMENT = "constant-function-state" # run the detector with slither.py --ARGUMENT
HELP = "Constant functions changing the state" # help information
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#constant-functions-changing-the-state"
WIKI_TITLE = "Constant functions changing the state"
# region wiki_description
WIKI_DESCRIPTION = """
Functions declared as `constant`/`pure`/`view` change the state.
`constant`/`pure`/`view` was not enforced prior to Solidity 0.5.
Starting from Solidity 0.5, a call to a `constant`/`pure`/`view` function uses the `STATICCALL` opcode, which reverts in case of state modification.
As a result, a call to an [incorrectly labeled function may trap a contract compiled with Solidity 0.5](https://solidity.readthedocs.io/en/develop/050-breaking-changes.html#interoperability-with-older-contracts)."""
# endregion wiki_description
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Constant{
uint counter;
function get() public view returns(uint){
counter = counter +1;
return counter
}
}
```
`Constant` was deployed with Solidity 0.4.25. Bob writes a smart contract that interacts with `Constant` in Solidity 0.5.0.
All the calls to `get` revert, breaking Bob's smart contract execution."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = (
"Ensure that attributes of contracts compiled prior to Solidity 0.5.0 are correct."
)
def _detect(self):
"""Detect the constant function changing the state
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func','#varsWritten'}
"""
results = []
if self.compilation_unit.solc_version and self.compilation_unit.solc_version >= "0.5.0":
return results
for c in self.contracts:
for f in c.functions:
if f.contract_declarer != c:
continue
if f.view or f.pure:
variables_written = f.all_state_variables_written()
if variables_written:
attr = "view" if f.view else "pure"
info = [
f,
f" is declared {attr} but changes state variables:\n",
]
for variable_written in variables_written:
info += ["\t- ", variable_written, "\n"]
res = self.generate_result(info, {"contains_assembly": False})
results.append(res)
return results
@staticmethod
def _format(slither, result):
custom_format(slither, result)
| 2.421875
| 2
|