max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
models/multi_emnist.py | OpenXAIProject/dac | 17 | 12769451 | <filename>models/multi_emnist.py
import os
import argparse
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision.utils import make_grid
from torch.nn.utils import weight_norm
from utils.misc import add_args
from utils.paths import datasets_path, benchmarks_path
from utils.tensor import to_numpy
from data.base import sample_anchors
from data.multi_emnist import ClusteredMultiEMNIST, sample_idxs, \
get_train_loader, get_test_loader
from modules.attention import StackedISAB, PMA, MAB
from modules.misc import Flatten, View
from flows.autoregressive import MAF
from flows.distributions import FlowDistribution, Normal, Bernoulli
from models.base import AnchoredFilteringModel, MinFilteringModel
parser = argparse.ArgumentParser()
# for training
parser.add_argument('--B', type=int, default=10)
parser.add_argument('--N', type=int, default=100)
parser.add_argument('--K', type=int, default=4)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--num_steps', type=int, default=20000)
parser.add_argument('--filtering_benchmark', type=str, default=None)
parser.add_argument('--clustering_benchmark', type=str, default=None)
parser.add_argument('--vB', type=int, default=1)
parser.add_argument('--vN', type=int, default=100)
parser.add_argument('--vK', type=int, default=4)
sub_args, _ = parser.parse_known_args()
class FilteringNetwork(nn.Module):
def __init__(self, num_filters=32, dim_lats=128,
dim_hids=256, dim_context=256, num_inds=32):
super().__init__()
C = num_filters
self.enc = nn.Sequential(
nn.Conv2d(3, C, 3, stride=2),
nn.BatchNorm2d(C),
nn.ReLU(),
nn.Conv2d(C, 2*C, 3, stride=2),
nn.BatchNorm2d(2*C),
nn.ReLU(),
nn.Conv2d(2*C, 4*C, 3),
Flatten())
self.isab1 = StackedISAB(4*C*4*4, dim_hids, num_inds, 4)
self.pma = PMA(dim_hids, dim_hids, 1)
self.fc1 = nn.Linear(dim_hids, dim_context)
self.posterior = Normal(dim_lats, use_context=True,
context_enc=nn.Linear(4*C*4*4 + dim_context, 2*dim_lats))
self.prior = FlowDistribution(
MAF(dim_lats, dim_hids, 6, dim_context=dim_context, inv_linear=True),
Normal(dim_lats))
self.dec = nn.Sequential(
nn.Linear(dim_lats + dim_context, 4*C*4*4),
nn.ReLU(),
View(-1, 4*C, 4, 4),
nn.ConvTranspose2d(4*C, 2*C, 3, stride=2, padding=1),
nn.BatchNorm2d(2*C),
nn.ReLU(),
nn.ConvTranspose2d(2*C, C, 3, stride=2, padding=1),
nn.BatchNorm2d(C),
nn.ReLU(),
nn.ConvTranspose2d(C, 3, 3, stride=2, output_padding=1),
View(-1, 3, 28, 28))
self.likel = Bernoulli((3, 28, 28), use_context=True)
self.mab = MAB(dim_hids, dim_hids, dim_hids)
self.isab2 = StackedISAB(dim_hids, dim_hids, num_inds, 4)
self.fc2 = nn.Linear(dim_hids, 1)
def forward(self, X, mask=None, return_z=False):
B, N, C, H, W = X.shape
x = X.view(B*N, C, H, W)
h_enc = self.enc(x)
H_X = self.isab1(h_enc.view(B, N, -1), mask=mask)
H_theta = self.pma(H_X, mask=mask)
theta = self.fc1(H_theta)
theta_ = theta.repeat(1, N, 1).view(B*N, -1)
z, logq = self.posterior.sample(context=torch.cat([h_enc, theta_], -1))
logp = self.prior.log_prob(z, context=theta_)
kld = (logq - logp).view(B, N)
h_dec = self.dec(torch.cat([z, theta_], -1))
ll = self.likel.log_prob(x, context=h_dec).view(B, N) - kld
ll /= H*W
H_dec = self.mab(H_X, H_theta)
logits = self.fc2(self.isab2(H_dec, mask=mask)).squeeze(-1)
outputs = {'ll':ll, 'theta':theta, 'logits':logits}
if return_z:
outputs['z'] = z
return outputs
class Model(MinFilteringModel):
def __init__(self, args):
super().__init__(args)
self.filtering_benchmark = os.path.join(benchmarks_path, 'memnist_10_100_4.tar') \
if self.filtering_benchmark is None \
else os.path.join(benchmarks_path, self.filtering_benchmark)
self.clustering_benchmark = os.path.join(benchmarks_path, 'memnist_10_300_12.tar') \
if self.clustering_benchmark is None \
else os.path.join(benchmarks_path, self.clustering_benchmark)
self.net = FilteringNetwork()
self.train_metrics = ['ll', 'bcent']
self.test_metrics = ['ll', 'bcent']
def sample(self, B, N, K, **kwargs):
dataset = ClusteredMultiEMNIST(train=False)
batch = sample_idxs(dataset.idx_to_class, B, N, K, **kwargs)
return dataset[batch]
def build_train_loader(self):
self.train_loader = get_train_loader(self.B, self.N, self.K, self.num_steps,
rand_N=True, rand_K=True)
def build_test_loader(self, filename=None):
filename = self.filtering_benchmark if filename is None else filename
self.test_loader = get_test_loader(filename)
def gen_benchmarks(self, force=False):
if not os.path.isfile(self.filtering_benchmark) or force:
print('generating benchmark {}...'.format(self.filtering_benchmark))
idx_to_class = ClusteredMultiEMNIST(train=False).idx_to_class
bench = [sample_idxs(idx_to_class, 10, 100, 4, rand_N=True, rand_K=True) \
for _ in range(100)]
torch.save(bench, self.filtering_benchmark)
print('generating benchmark {}...'.format(self.clustering_benchmark))
bench = [sample_idxs(idx_to_class, 10, 300, 12, rand_N=True, rand_K=True) \
for _ in range(100)]
torch.save(bench, self.clustering_benchmark)
def combine_digits(self, X):
B, N, C, H, W = X.shape
cX = torch.zeros(B, N, 1, 2*H, 2*W)
cX[:,:,0,:H,:W] = X[:,:,0,:,:]
cX[:,:,0,:H,W:] = X[:,:,1,:,:]
cX[:,:,0,H:,:W] = X[:,:,2,:,:]
return cX
def plot_clustering(self, X, results):
X = self.combine_digits(X)[0]
labels = results['labels'][0]
ulabels = torch.unique(labels)
K = len(ulabels)
fig, axes = plt.subplots(1, K, figsize=(50, 50))
for k, l in enumerate(ulabels):
Xk = X[labels==l]
Xk = Xk[: Xk.shape[0] - Xk.shape[0] % 4]
I = to_numpy(make_grid(1-Xk, nrow=4, pad_value=0)).transpose(1, 2, 0)
axes[k].set_title('cluster {}'.format(k+1), fontsize=100)
axes[k].imshow(I)
axes[k].axis('off')
plt.tight_layout()
def plot_filtering(self, batch):
X = batch['X'].cuda()
B, N, C, H, W = X.shape
net = self.net
net.eval()
with torch.no_grad():
outputs = net(X, return_z=True)
theta = outputs['theta']
theta_ = theta.repeat(1, N, 1).view(B*N, -1)
labels = (outputs['logits'] > 0.0).long()
# conditional generation
z, _ = net.prior.sample(B*N, device='cuda', context=theta_)
h_dec = net.dec(torch.cat([z, theta_], -1))
gX, _ = net.likel.sample(context=h_dec)
gX = gX.view(B, N, C, H, W)
z = outputs['z']
h_dec = net.dec(torch.cat([z, theta_], -1))
rX, _ = net.likel.sample(context=h_dec)
rX = rX.view(B, N, C, H, W)
fig, axes = plt.subplots(1, 2, figsize=(40, 40))
X = self.combine_digits(X)[0]
labels = labels[0]
X1 = X[labels==1]
X1 = X1[: X1.shape[0] - X1.shape[0] % 8]
I = to_numpy(make_grid(1-X1, nrow=8, pad_value=0)).transpose(1, 2, 0)
axes[0].imshow(I)
axes[0].set_title('Filtered out images', fontsize=60, pad=20)
axes[0].axis('off')
X0 = X[labels==0]
X0 = X0[: X0.shape[0] - X0.shape[0] % 8]
I = to_numpy(make_grid(1-X0, nrow=8, pad_value=0)).transpose(1, 2, 0)
axes[1].imshow(I)
axes[1].set_title('Remaining images', fontsize=60, pad=20)
axes[1].axis('off')
plt.tight_layout()
#plt.savefig('figures/emnist_filtering.png', bbox_inches='tight')
gX = self.combine_digits(gX)[0][:32]
plt.figure()
I = to_numpy(make_grid(1-gX, nrow=8, pad_value=0)).transpose(1, 2, 0)
plt.imshow(I)
plt.title('Generated images', fontsize=15, pad=5)
plt.axis('off')
#plt.savefig('figures/emnist_gen.png', bbox_inches='tight')
fig, axes = plt.subplots(1, 2, figsize=(40, 40))
rX = self.combine_digits(rX)[0]
X1 = rX[labels==1]
X1 = X1[: X1.shape[0] - X1.shape[0] % 8]
I = to_numpy(make_grid(1-X1, nrow=8, pad_value=0)).transpose(1, 2, 0)
axes[0].imshow(I)
axes[0].set_title('Reconstructions of filtered out images', fontsize=60, pad=20)
axes[0].axis('off')
X0 = rX[labels==0]
X0 = X0[: X0.shape[0] - X0.shape[0] % 8]
I = to_numpy(make_grid(1-X0, nrow=8, pad_value=0)).transpose(1, 2, 0)
axes[1].imshow(I)
axes[1].set_title('Reconstructions of remaining images', fontsize=60, pad=20)
axes[1].axis('off')
plt.tight_layout()
#plt.savefig('figures/emnist_recon.png', bbox_inches='tight')
def load(args):
add_args(args, sub_args)
return Model(args)
| 1.953125 | 2 |
app/file_helpers.py | lokalise-document-preview/docx-to-pdf | 0 | 12769452 | from io import BytesIO
from zipfile import ZipFile
import urllib.request
ALLOWED_EXTENSIONS = set(['docx'])
def is_file_allowed(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def unzip_into_bytes(url):
zip = urllib.request.urlopen(url)
archive = ZipFile(BytesIO(zip.read()))
for file in archive.filelist:
if not file.filename.endswith("/"):
return archive.read(file)
| 3.359375 | 3 |
pysot/models/non_local/__init__.py | eldercrow/tracking-pytorch | 0 | 12769453 | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
from pysot.models.non_local.non_local import APNB, MultiAPNB
NONLOCAL = {
'NonLocal': APNB,
'MultiNonLocal': MultiAPNB
}
def get_nonlocal(name, **kwargs):
return NONLOCAL[name](**kwargs)
| 1.710938 | 2 |
ETL/north_south_etl.py | Jonas2019/car-accident-analysis | 0 | 12769454 | import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql.functions import *
import os
# Configure spark session
# ?readPreference=primaryPreferred
spark = SparkSession\
.builder\
.master('local[2]')\
.appName('accidents_etl')\
.config("spark.mongodb.input.uri", 'mongodb+srv://dbAdmin:<EMAIL>.net/CMPT732.Project')\
.config('spark.mongodb.output.uri', 'mongodb+srv://dbAdmin:cmpt732@<EMAIL>.jfbfw.mongodb.net/CMPT732.Project')\
.config('spark.jars.packages', 'org.mongodb.spark:mongo-spark-connector_2.12:3.0.1')\
.getOrCreate()
accidents_schema = StructType([
StructField('ID', StringType()),
StructField('Severity', IntegerType()),
StructField('Start_Time', TimestampType()),
StructField('End_Time', TimestampType()),
StructField('Start_Lat', DoubleType()),
StructField('Start_Lng', DoubleType()),
StructField('End_Lat', DoubleType()),
StructField('End_Lng', DoubleType()),
StructField('Distance(mi)', DoubleType()),
StructField('Description', StringType()),
StructField('Number', DoubleType()),
StructField('Street', StringType()),
StructField('Side', StringType()),
StructField('City', StringType()),
StructField('County', StringType()),
StructField('State', StringType()),
StructField('Zipcode', StringType()),
StructField('Country', StringType()),
StructField('Timezone', StringType()),
StructField('Airport_Code', StringType()),
StructField('Weather_Timestamp', StringType()),
StructField('Temperature(F)', DoubleType()),
StructField('Wind_Chill(F)', DoubleType()),
StructField('Humidity(%)', DoubleType()),
StructField('Pressure(in)', DoubleType()),
StructField('Visibility(mi)', DoubleType()),
StructField('Wind_Direction', StringType()),
StructField('Wind_Speed(mph)', DoubleType()),
StructField('Precipitation(in)', DoubleType()),
StructField('Weather_Condition', StringType()),
StructField('Amenity', StringType()),
StructField('Bump', StringType()),
StructField('Crossing', StringType()),
StructField('Give_Way', StringType()),
StructField('Junction', StringType()),
StructField('No_Exit', StringType()),
StructField('Railway', StringType()),
StructField('Roundabout', StringType()),
StructField('Station', StringType()),
StructField('Stop', StringType()),
StructField('Traffic_Calming', StringType()),
StructField('Traffic_Signal', StringType()),
StructField('Turning_Loop', StringType()),
StructField('Sunrise_Sunset', StringType()),
StructField('Civil_Twilight', StringType()),
StructField('Nautical_Twilight', StringType()),
StructField('Astronomical_Twilight', StringType()),
])
# Change the current working directory to root
path = os.path.dirname(__file__)
path = path.rstrip("/ETL")
os.chdir(path)
# Load df
df = spark.read.csv("Accident_No_NA.csv", schema=accidents_schema,header=True)
df.select(df['Start_Time'])
df=df.withColumn('date',to_date(df['Start_Time'],"yyyy-MM-dd")) #convert timestamp to datetime
df=df.select(df['State'],df['start_lat'],df['date'],year(df['date']).alias('Year'), month(df['date']).alias('Month'),dayofmonth(df['date']),df['Timezone']).cache()
df=df.filter((df['Year']=='2017')|(df['Year']=='2018')|(df['Year']=='2019')).cache()
#1 month:
df1=df.groupBy(df['Month']).count().orderBy(df['Month'])
df1.show()
df1.write.format('mongo')\
.mode('overwrite')\
.option('spark.mongodb.output.uri', 'mongodb+srv://dbAdmin:cmpt732@cluster732.jfbfw.mongodb.net/CMPT732.monthCount').save()
#2. weekday:
df2 = df.select(dayofweek(df['date']).alias('day_of_Week'))
df2 = df2.groupBy(df2['day_of_Week']).count().orderBy(df2['day_of_Week'])
df2.show()
#df2=df1.filter(df1['year(date)']=='2020')
#df2=df1.filter(df1['state_name']=='FL')
#df2=df2.filter(df1['start_lat']>45)
df2.write.format('mongo')\
.mode('overwrite')\
.option('spark.mongodb.output.uri', 'mongodb+srv://dbAdmin:cmpt732@<EMAIL>32.jfbfw.mongodb.net/CMPT732.dayofWeek').save()
#3.north vs south:
df3=df.filter(df['start_lat']>37)
df4=df.filter(df['start_lat']<30)
df3=df3.filter(df3['Timezone']=='US/Eastern')
df4=df4.filter(df4['Timezone']=='US/Eastern')
df3=df3.groupBy(df3['Month']).count().orderBy(df3['Month'])
df4=df4.groupBy(df4['Month']).count().orderBy(df4['Month'])
df3.show()
df4.show()
df3.write.format('mongo')\
.mode('overwrite')\
.option('spark.mongodb.output.uri', 'mongodb+srv://dbAdmin:cmpt732@<EMAIL>732.jfbfw.mongodb.net/CMPT732.northCount').save()
df4.write.format('mongo')\
.mode('overwrite')\
.option('spark.mongodb.output.uri', 'mongodb+srv://dbAdmin:cmpt732@cluster732.jfbfw.mongodb.net/CMPT732.southCount').save()
| 2.5 | 2 |
tensorflow/python/ops/ragged/ragged_split_op_test.py | TheRakeshPurohit/tensorflow | 7 | 12769455 | <reponame>TheRakeshPurohit/tensorflow
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.split."""
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedSplitOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters([
#=========================================================================
# Uniform splits.
#=========================================================================
dict(
descr='Uniform splits, rank-2 inputs, axis=0',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=2,
expected=[
[[1]],
[[2, 3, 4]]]),
dict(
descr='Uniform 3 splits, rank-2 inputs, axis=0',
pylist=[1, 2, 3, 4],
row_lengths=[1, 2, 1], # shape=(3, None)
num_or_size_splits=3,
expected=[
[[1]],
[[2, 3]],
[[4]]]),
dict(
descr='Uniform 5 splits, rank-2 inputs, axis=0',
pylist=[1, 2, 3, 4, 5],
row_lengths=[1, 1, 1, 1, 1], # shape=(5, None)
num_or_size_splits=5,
expected=[
[[1]],
[[2]],
[[3]],
[[4]],
[[5]]]),
dict(
descr='Uniform 2 splits, rank-2 inputs(empty), axis=0',
pylist=[1, 2, 3, 4],
row_lengths=[4, 0], # shape=(2, None)
num_or_size_splits=2,
expected=[
[[1, 2, 3, 4]],
[[]]]),
dict(
descr='Uniform 2 splits, rank-2 inputs(all empty), axis=0',
pylist=[],
row_lengths=[0, 0], # shape=(2, None)
num_or_size_splits=2,
expected=[
[[]],
[[]]]),
dict(
descr='Uniform 1 split, rank-2 inputs, axis=0',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=1,
expected=[
[[1], [2, 3, 4]]]),
dict(
descr='Uniform 1 split, rank-2 inputs, axis=1',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=1,
axis=1,
expected=[
[[1], [2, 3, 4]]]),
dict(
descr='Uniform 2 split, rank-3 inputs, axis=0',
pylist=np.arange(4 * 2).reshape(4, 2),
row_lengths=[1, 3], # shape=(2, None, 2)
num_or_size_splits=2,
expected=[
[[[0, 1]]],
[[[2, 3], [4, 5], [6, 7]]]]),
dict(
descr='Uniform 2 splits, rank-3 inputs, axis=2',
pylist=np.arange(4 * 2).reshape(4, 2),
row_lengths=[1, 3], # shape=(2, None, 2)
num_or_size_splits=2,
axis=2,
expected=[
[[[0]], [[2], [4], [6]]],
[[[1]], [[3], [5], [7]]]]),
dict(
descr='Uniform 2 splits, rank-2 float inputs, axis=0',
pylist=[1.0, 2.0, 3.0, 4.0],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=2,
expected=[
[[1.0]],
[[2.0, 3.0, 4.0]]]),
dict(
descr='Uniform 2 splits, rank-2 string inputs, axis=0',
pylist=[b'a', b'bc', b'', b'd'],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=2,
expected=[
[[b'a']],
[[b'bc', b'', b'd']]]),
#=========================================================================
# Ragged splits.
#=========================================================================
dict(
descr='Ragged 2 splits, rank-2 inputs, axis=0',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=[1, 1],
expected=[
[[1]],
[[2, 3, 4]]]),
dict(
descr='Ragged 3 splits, rank-2 inputs, axis=0',
pylist=[1, 2, 3, 4],
row_lengths=[1, 2, 1], # shape=(3, None)
num_or_size_splits=[1, 2],
expected=[
[[1]],
[[2, 3], [4]]]),
dict(
descr='Ragged 5 splits, rank-2 inputs(empty), axis=0',
pylist=[1, 2, 3, 4, 5],
row_lengths=[1, 1, 1, 1, 1], # shape=(5, None)
num_or_size_splits=[1, 2, 2, 0],
expected=[
[[1]],
[[2], [3]],
[[4], [5]],
[]]),
dict(
descr='Ragged 2 splits, rank-2 inputs(empty), axis=0',
pylist=[1, 2, 3, 4],
row_lengths=[4, 0, 0, 0], # shape=(2, None)
num_or_size_splits=[3, 1],
expected=[
[[1, 2, 3, 4], [], []],
[[]]]),
dict(
descr='Ragged 2 splits, rank-2 inputs(all empty), axis=0',
pylist=[],
row_lengths=[0, 0], # shape=(2, None)
num_or_size_splits=[2, 0],
expected=[
[[], []],
[]]),
dict(
descr='Ragged 1 split, rank-2 inputs, axis=0',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=[2],
expected=[
[[1], [2, 3, 4]]]),
dict(
descr='Ragged 2 split, rank-3 inputs, axis=0',
pylist=np.arange(4 * 2).reshape(4, 2),
row_lengths=[1, 3], # shape=(2, None, 2)
num_or_size_splits=[1, 1],
expected=[
[[[0, 1]]],
[[[2, 3], [4, 5], [6, 7]]]]),
dict(
descr='Ragged 2 split, rank-3 inputs, axis=-3',
pylist=np.arange(4 * 2).reshape(4, 2),
row_lengths=[1, 3], # shape=(2, None, 2)
num_or_size_splits=[1, 1],
expected=[
[[[0, 1]]],
[[[2, 3], [4, 5], [6, 7]]]]),
dict(
descr='Ragged 2 splits, rank-3 inputs, axis=2',
pylist=np.arange(4 * 3).reshape(4, 3),
row_lengths=[1, 3], # shape=(2, None, 3)
num_or_size_splits=[2, 1],
axis=2,
expected=[
[[[0, 1]], [[3, 4], [6, 7], [9, 10]]],
[[[2]], [[5], [8], [11]]]]),
dict(
descr='Ragged 2 splits, rank-3 inputs, axis=-1',
pylist=np.arange(4 * 3).reshape(4, 3),
row_lengths=[1, 3], # shape=(2, None, 3)
num_or_size_splits=[2, 1],
axis=2,
expected=[
[[[0, 1]], [[3, 4], [6, 7], [9, 10]]],
[[[2]], [[5], [8], [11]]]]),
dict(
descr='Ragged 3 splits, rank-2 float inputs, axis=0',
pylist=[1.0, 2.0, 3.0, 4.0],
row_lengths=[1, 2, 1], # shape=(2, None)
num_or_size_splits=[2, 1],
expected=[
[[1.0], [2.0, 3.0]],
[[4.0]]]),
dict(
descr='Ragged 3 splits with name, rank-2 float inputs, axis=0',
pylist=[1.0, 2.0, 3.0, 4.0],
row_lengths=[1, 2, 1], # shape=(2, None)
num_or_size_splits=[2, 1],
name='ragged_split',
expected=[
[[1.0], [2.0, 3.0]],
[[4.0]]]),
dict(
descr='Ragged 3 splits with num, rank-2 float inputs, axis=0',
pylist=[1.0, 2.0, 3.0, 4.0],
row_lengths=[1, 2, 1], # shape=(2, None)
num_or_size_splits=[2, 1],
num=2,
expected=[
[[1.0], [2.0, 3.0]],
[[4.0]]]),
dict(
descr='Ragged 2 splits, rank-2 string inputs, axis=0',
pylist=[b'a', b'bc', b'', b'd'],
row_lengths=[1, 3, 0], # shape=(2, None)
num_or_size_splits=[2, 1],
expected=[
[[b'a'], [b'bc', b'', b'd']],
[[]]]),
]) # pyformat: disable
def testSplit(self,
descr,
pylist,
row_lengths,
num_or_size_splits,
expected,
axis=0,
num=None,
name=None):
rt = ragged_tensor.RaggedTensor.from_row_lengths(pylist, row_lengths)
result = ragged_array_ops.split(rt, num_or_size_splits, axis, num, name)
self.assertLen(result, len(expected))
for res, exp in zip(result, expected):
self.assertAllEqual(res, exp)
@parameterized.parameters([
#=========================================================================
# Uniform splits errors.
#=========================================================================
dict(
descr='Uniform split, can not split',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=7,
exception=errors.InvalidArgumentError,
message='Cannot exactly split'),
dict(
descr='Uniform split, ragged dimension',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=2,
axis=1,
exception=ValueError,
message='ragged dimension'),
dict(
descr='Uniform split, zero split',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=0,
exception=ValueError,
message='must be >=1'),
#=========================================================================
# Ragged splits errors.
#=========================================================================
dict(
descr='Ragged split, 2 dimensional size_splits',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=[[1, 1]],
exception=TypeError,
message='Python list'),
dict(
descr='Ragged split, ragged dimension',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=[1, 1],
axis=1,
exception=ValueError,
message='ragged dimension'),
dict(
descr='Ragged split, cannot split',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=[1, 2],
exception=errors.InvalidArgumentError,
message='Cannot exactly split'),
dict(
descr='Ragged split, num does not match',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=[1, 1],
num=3,
exception=ValueError,
message='`num` does not match'),
dict(
descr='Ragged split, negative split',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=[1, -1, 2],
num=3,
exception=errors.InvalidArgumentError,
message='must be non-negative'),
dict(
descr='Ragged split, float splits',
pylist=[1, 2, 3, 4],
row_lengths=[1, 3], # shape=(2, None)
num_or_size_splits=[1.0, 2.0],
num=2,
exception=TypeError,
message='integer'),
]) # pyformat: disable
def testSplitError(self,
descr,
pylist,
row_lengths,
num_or_size_splits,
exception,
message,
axis=0,
num=None):
rt = ragged_tensor.RaggedTensor.from_row_lengths(pylist, row_lengths)
with self.assertRaises(exception):
result = ragged_array_ops.split(rt, num_or_size_splits, axis, num)
self.evaluate(result)
@parameterized.named_parameters([
('int32', dtypes.int32),
('int64', dtypes.int64)])
def testSplitTensorDtype(self, dtype):
rt = ragged_tensor.RaggedTensor.from_row_lengths([1.0, 2.0, 3.0, 4.0],
[3, 1])
# split_lengths is a 1-D tensor
split_lengths = ops.convert_to_tensor([1, 1], dtype=dtype)
result = ragged_array_ops.split(rt, split_lengths)
expected = [
ragged_tensor.RaggedTensor.from_row_lengths([1.0, 2.0, 3.0], [3]),
ragged_tensor.RaggedTensor.from_row_lengths([4.0], [1])]
self.assertLen(result, len(expected))
for res, exp in zip(result, expected):
self.assertAllEqual(res, exp)
@parameterized.parameters([
dict(rt_shape=(2, None)),
dict(rt_shape=None),
])
def testUniformSplitDynamicShape(self, rt_shape):
rt = ragged_tensor.RaggedTensor.from_row_lengths([1.0, 2.0, 3.0, 4.0],
[3, 1])
rt_spec = ragged_tensor.RaggedTensorSpec(rt_shape, ragged_rank=1)
@def_function.function(input_signature=[rt_spec])
def split_tensors(rt):
return ragged_array_ops.split(rt, 2)
splited_rts = split_tensors(rt)
expected_rts = [
ragged_tensor.RaggedTensor.from_row_lengths([1.0, 2.0, 3.0], [3]),
ragged_tensor.RaggedTensor.from_row_lengths([4.0], [1])]
for splited_rt, expected_rt in zip(splited_rts, expected_rts):
self.assertAllEqual(splited_rt, expected_rt)
@parameterized.parameters([
dict(rt_shape=x, lengths_shape=y) for x, y in itertools.product(
[(2, None), None],
[(2,), (None,), None])
])
def testRaggedSplitDynamicShape(self, rt_shape, lengths_shape):
rt_spec = ragged_tensor.RaggedTensorSpec(rt_shape, ragged_rank=1)
lengths_spec = tensor_spec.TensorSpec(lengths_shape, dtype=dtypes.int32)
@def_function.function(input_signature=[rt_spec, lengths_spec])
def split_tensors(rt, split_lengths):
return ragged_array_ops.split(rt, split_lengths, num=2)
rt = ragged_tensor.RaggedTensor.from_row_lengths([1.0, 2.0, 3.0, 4.0],
[3, 1])
split_lengths = [1, 1]
# split_lengths matches num at runtime
splited_rts = split_tensors(rt, split_lengths)
expected_rts = [
ragged_tensor.RaggedTensor.from_row_lengths([1.0, 2.0, 3.0], [3]),
ragged_tensor.RaggedTensor.from_row_lengths([4.0], [1])]
for splited_rt, expected_rt in zip(splited_rts, expected_rts):
self.assertAllEqual(splited_rt, expected_rt)
@parameterized.parameters([
dict(
descr='lengths known rank, num and lengths mismatch',
rt_shape=(2, None),
lengths_shape=(None,),
lengths=[1, 1, 0],
num=2,
exception=errors.InvalidArgumentError,
message='inconsistent'),
dict(
descr='lengths unknown rank, num and lengths mismatch',
rt_shape=None,
lengths_shape=None,
lengths=[1, 1, 0],
num=2,
exception=errors.InvalidArgumentError,
message='inconsistent'),
dict(
descr='rt unknown rank, negative axis',
rt_shape=None,
lengths_shape=None,
lengths=[1, 1],
axis=-2,
num=2,
exception=ValueError,
message='negative'),
dict(
descr='lengths unknown rank, num is None',
rt_shape=None,
lengths_shape=None,
lengths=[1, 1],
exception=ValueError,
message='`num` must be specified'),
dict(
descr='lengths unknown rank, dynamic rank!=1',
rt_shape=None,
lengths_shape=None,
lengths=[[1, 1]],
num=2,
exception=(ValueError, errors.InvalidArgumentError)),
])
def testRaggedSplitDynamicShapeError(self,
descr,
rt_shape,
lengths_shape,
lengths,
exception,
message='',
axis=0,
num=None):
rt_spec = ragged_tensor.RaggedTensorSpec(rt_shape, ragged_rank=1)
split_lengths_spec = tensor_spec.TensorSpec(lengths_shape,
dtype=dtypes.int32)
@def_function.function(input_signature=[rt_spec, split_lengths_spec])
def split_tensors(rt, split_lengths):
return ragged_array_ops.split(rt, split_lengths, axis=axis, num=num)
rt = ragged_tensor.RaggedTensor.from_row_lengths([1.0, 2.0, 3.0, 4.0],
[3, 1])
with self.assertRaisesRegex(exception, message):
self.evaluate(split_tensors(rt=rt, split_lengths=lengths))
if __name__ == '__main__':
googletest.main()
| 2 | 2 |
lims/codonusage/serializers.py | sqilz/LIMS-Backend | 12 | 12769456 | from rest_framework import serializers
from .models import CodonUsageTable, CodonUsage
class CodonUsageTableSerializer(serializers.ModelSerializer):
species = serializers.SlugRelatedField(read_only=True, slug_field='name')
class Meta:
model = CodonUsageTable
fields = '__all__'
class CodonUsageSerializer(serializers.ModelSerializer):
class Meta:
model = CodonUsage
exclude = ('table',)
| 2.125 | 2 |
0110.balanced_binary_tree/solution.py | WZMJ/Algorithms | 5 | 12769457 | from utils import TreeNode
class Solution:
def is_balance(self, root: TreeNode):
if not root:
return True
left = self.depth(root.left)
right = self.depth(root.right)
return abs(left - right) <= 1 and self.is_balance(root.left) and self.is_balance(root.right)
def depth(self, root: TreeNode):
if not root:
return 0
return 1 + max([self.depth(root.left), self.depth(root.right)])
| 3.65625 | 4 |
DSAE_PBHL/Model.py | RyoOzaki/SparseAutoencoder | 1 | 12769458 | <filename>DSAE_PBHL/Model.py
from abc import ABCMeta
class Model(metaclass=ABCMeta):
pass
class PB_Model(Model, metaclass=ABCMeta):
pass
| 1.875 | 2 |
web/__init__.py | DJBnjack/docker-server | 0 | 12769459 | <reponame>DJBnjack/docker-server
__author__ = 'Johannes'
| 1.023438 | 1 |
packages/grid/backend/alembic/versions/2022-03-31_f712122fe780.py | jackbandy/PySyft | 0 | 12769460 | """add a boolean flag for proxy dataset in ObjectMetadata
Revision ID: f712122fe780
Revises: <PASSWORD>
Create Date: 2022-03-31 07:20:49.411961
"""
# third party
from alembic import op # type: ignore
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "f712122fe780"
down_revision = "70fcad0b1795"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column(
"obj_metadata", sa.Column("is_proxy_dataset", sa.Boolean(), default=False)
)
def downgrade() -> None:
op.drop_column("obj_metadata", "is_proxy_dataset")
| 0.867188 | 1 |
cpdb/data_importer/tests/management/test_crawl_copa_data.py | invinst/CPDBv2_backend | 25 | 12769461 | from django.test import TestCase
from django.core.management import call_command
from mock import patch
from robber import expect
from data.factories import AttachmentFileFactory
from data.constants import AttachmentSourceType
class CrawlCopaPortalDataTestCase(TestCase):
@patch('data_importer.copa_crawler.importers.CopaPortalAttachmentImporter.crawl_and_update_attachments')
@patch('data_importer.copa_crawler.importers.CopaSummaryReportsAttachmentImporter.crawl_and_update_attachments')
@patch('data_importer.management.commands.crawl_copa_data.send_cr_attachment_available_email')
def test_handle(self, send_email_mock, summary_reports_importer_mock, portal_importer_mock):
attachment_file_1 = AttachmentFileFactory(source_type=AttachmentSourceType.PORTAL_COPA)
attachment_file_2 = AttachmentFileFactory(source_type=AttachmentSourceType.SUMMARY_REPORTS_COPA)
portal_importer_mock.return_value = [attachment_file_1]
summary_reports_importer_mock.return_value = [attachment_file_2]
call_command('crawl_copa_data')
expect(send_email_mock).to.be.called_with([attachment_file_1, attachment_file_2])
| 2.109375 | 2 |
_django/migrations/0001_initial.py | zzzeek/imdbench | 50 | 12769462 | # Generated by Django 2.1 on 2018-09-21 16:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cast',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('list_order', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Directors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('list_order', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.CharField(max_length=200)),
('title', models.CharField(max_length=200)),
('year', models.IntegerField()),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=200)),
('middle_name', models.CharField(blank=True, max_length=200)),
('last_name', models.CharField(max_length=200)),
('image', models.CharField(max_length=200)),
('bio', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('rating', models.IntegerField()),
('creation_time', models.DateTimeField()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('image', models.CharField(max_length=200)),
],
),
migrations.AddField(
model_name='review',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to='_django.User'),
),
migrations.AddField(
model_name='review',
name='movie',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to='_django.Movie'),
),
migrations.AddField(
model_name='movie',
name='cast',
field=models.ManyToManyField(related_name='acted_in', through='_django.Cast', to='_django.Person'),
),
migrations.AddField(
model_name='movie',
name='directors',
field=models.ManyToManyField(related_name='directed', through='_django.Directors', to='_django.Person'),
),
migrations.AddField(
model_name='directors',
name='movie',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='directors_rel', to='_django.Movie'),
),
migrations.AddField(
model_name='directors',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='directed_rel', to='_django.Person'),
),
migrations.AddField(
model_name='cast',
name='movie',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cast_rel', to='_django.Movie'),
),
migrations.AddField(
model_name='cast',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='acted_in_rel', to='_django.Person'),
),
]
| 1.875 | 2 |
src/patcher.py | MustangYM/xia0LLDB | 464 | 12769463 | <filename>src/patcher.py
#! /usr/bin/env python3
# ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______
# |______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|
# _ ___ _ _ _____ ____
# (_) / _ \| | | | | __ \| _ \
# __ ___ __ _| | | | | | | | | | | |_) |
# \ \/ / |/ _` | | | | | | | | | | | _ <
# > <| | (_| | |_| | |____| |____| |__| | |_) |
# /_/\_\_|\__,_|\___/|______|______|_____/|____/
# ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______ ______
# |______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|______|
import lldb
import os
import shlex
import optparse
import json
import re
import utils
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
'command script add -f patcher.handle_command patcher -h "patch code in lldb"')
# print('========')
# print('[patcher]: patch code in lldb')
# print('\tpatcher -a patch_addr -i instrument -s instrument_count')
# print('\tmore usage, try "patcher -h"')
def handle_command(debugger, command, exe_ctx, result, internal_dict):
command_args = shlex.split(command, posix=False)
parser = generate_option_parser()
try:
(options, _) = parser.parse_args(command_args)
except:
result.SetError(parser.usage)
return
_ = exe_ctx.target
_ = exe_ctx.thread
if options.patchInstrument:
if options.patchAddress:
patch_addr = int(options.patchAddress, 16)
else:
ret = utils.exe_cmd(debugger, "p/x $pc")
ret = ret.strip()
pattern = '0x[0-9a-f]+'
match = re.search(pattern, ret)
if match:
found = match.group(0)
else:
utils.ELOG("not get address:"+ret)
return
utils.ILOG("you not set patch address, default is current pc address:{}".format(found))
patch_addr = int(found, 16)
patch_ins = options.patchInstrument
# default instrument size is 1
patch_size = 0x1
patch_ins = patch_ins.replace("\"", "")
patch_ins = patch_ins.replace("'", "")
if options.patchSize:
patch_size = int(options.patchSize)
ret = patcher(debugger, patch_ins, patch_addr, patch_size)
result.AppendMessage(str(ret))
else:
result.AppendMessage("[-] args error, check it !")
return
def patch_code(debugger, addr, ins, count):
command_script = '@import Foundation;\n'
command_script += 'uint64_t x_addr = {};\n'.format(addr)
command_script += 'uint8_t patch_data[] = {};\n'.format(ins)
command_script += 'int insCount = {};\n'.format(count)
command_script += r'''
NSMutableString* retStr = [NSMutableString string];
void * patch_addr = (void*)x_addr;
//uint8_t patch_data[] = {0xc0, 0x03, 0x5f, 0xd6};
int patch_data_size = 4*insCount;
// =====================================================patch code=============================================
typedef bool (*patch_code_t)(void* patch_addr, uint8_t* patch_data, int patch_data_size);
patch_code_t patch_code = [](void* patch_addr, uint8_t* patch_data, int patch_data_size) -> bool {
#define PAGE_SIZE 0x0000000000004000
#define PAGE_MASK 0x0000000000003fff
#define RTLD_LAZY 0x1
#define RTLD_NOW 0x2
#define RTLD_LOCAL 0x4
#define RTLD_GLOBAL 0x8
#define VM_PROT_READ ((vm_prot_t) 0x01)
#define VM_PROT_WRITE ((vm_prot_t) 0x02)
#define VM_PROT_EXECUTE ((vm_prot_t) 0x04)
#define PROT_NONE 0x00 /* [MC2] no permissions */
#define PROT_READ 0x01 /* [MC2] pages can be read */
#define PROT_WRITE 0x02 /* [MC2] pages can be written */
#define PROT_EXEC 0x04 /* [MC2] pages can be executed */
#define MAP_SHARED 0x0001
#define MAP_ANON 0x1000
#define KERN_SUCCESS 0
typedef unsigned int mach_port_t;
typedef int kern_return_t;
typedef unsigned int vm_inherit_t;
typedef mach_port_t task_t;
typedef int vm_prot_t;
typedef unsigned long uintptr_t;
typedef uintptr_t vm_offset_t;
typedef vm_offset_t vm_address_t;
typedef uint64_t mach_vm_address_t;
typedef int boolean_t;
typedef int vm_behavior_t;
typedef uint32_t vm32_object_id_t;
typedef uintptr_t vm_size_t;
typedef int *vm_region_recurse_info_t;
typedef unsigned long long memory_object_offset_t;
struct vm_region_submap_short_info_64 {
vm_prot_t protection; /* present access protection */
vm_prot_t max_protection; /* max avail through vm_prot */
vm_inherit_t inheritance;/* behavior of map/obj on fork */
memory_object_offset_t offset; /* offset into object/map */
unsigned int user_tag; /* user tag on map entry */
unsigned int ref_count; /* obj/map mappers, etc */
unsigned short shadow_depth; /* only for obj */
unsigned char external_pager; /* only for obj */
unsigned char share_mode; /* see enumeration */
boolean_t is_submap; /* submap vs obj */
vm_behavior_t behavior; /* access behavior hint */
vm32_object_id_t object_id; /* obj/map name, not a handle */
unsigned short user_wired_count;
};
typedef unsigned int __darwin_natural_t;
typedef __darwin_natural_t natural_t;
typedef natural_t mach_msg_type_number_t;
typedef struct vm_region_submap_short_info_64 vm_region_submap_short_info_data_64_t;
#define VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 \
((mach_msg_type_number_t) \
(sizeof (vm_region_submap_short_info_data_64_t) / sizeof (natural_t)))
#define VM_FLAGS_OVERWRITE 0x4000 /* delete any existing mappings first */
typedef int __int32_t;
typedef __int32_t __darwin_pid_t;
typedef __darwin_pid_t pid_t;
// init value
kern_return_t kret;
task_t self_task = (task_t)mach_task_self();
/* Set platform binary flag */
#define FLAG_PLATFORMIZE (1 << 1)
// platformize_me
// https://github.com/pwn20wndstuff/Undecimus/issues/112
/*
void* handle = (void*)dlopen("/usr/lib/libjailbreak.dylib", RTLD_LAZY);
if (!handle){
//[retStr appendString:@"[-] /usr/lib/libjailbreak.dylib dlopen failed!\n"];
return false;
}
// Reset errors
(const char *)dlerror();
typedef void (*fix_entitle_prt_t)(pid_t pid, uint32_t what);
fix_entitle_prt_t ptr = (fix_entitle_prt_t)dlsym(handle, "jb_oneshot_entitle_now");
const char *dlsym_error = (const char *)dlerror();
if (dlsym_error) return;
ptr((pid_t)getpid(), FLAG_PLATFORMIZE);
//[retStr appendString:@"\n[+] platformize me success!"];
*/
void* target_addr = patch_addr;
// 1. get target address page and patch offset
unsigned long page_start = (unsigned long) (target_addr) & ~PAGE_MASK;
unsigned long patch_offset = (unsigned long)target_addr - page_start;
// map new page for patch
void *new_page = (void *)mmap(NULL, PAGE_SIZE, 0x1 | 0x2, 0x1000 | 0x0001, -1, 0);
if (!new_page ){
//[retStr appendString:@"[-] mmap failed!\n"];
return false;
}
kret = (kern_return_t)vm_copy(self_task, (unsigned long)page_start, PAGE_SIZE, (vm_address_t) new_page);
if (kret != KERN_SUCCESS){
//[retStr appendString:@"[-] vm_copy faild!\n"];
return false;
}
// 4. start patch
/*
nop -> {0x1f, 0x20, 0x03, 0xd5}
ret -> {0xc0, 0x03, 0x5f, 0xd6}
*/
// char patch_ins_data[4] = {0x1f, 0x20, 0x03, 0xd5};
// mach_vm_write(task_self, (vm_address_t)(new+patch_offset), patch_ret_ins_data, 4);
memcpy((void *)((uint64_t)new_page+patch_offset), patch_data, patch_data_size);
//[retStr appendString:@"[+] patch ret[0xc0 0x03 0x5f 0xd6] with memcpy\n"];
// set back to r-x
(int)mprotect(new_page, PAGE_SIZE, PROT_READ | PROT_EXEC);
//[retStr appendString:@"[*] set new page back to r-x success!\n"];
// remap
vm_prot_t prot;
vm_inherit_t inherit;
// get page info
vm_address_t region = (vm_address_t) page_start;
vm_size_t region_len = 0;
struct vm_region_submap_short_info_64 vm_info;
mach_msg_type_number_t info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
natural_t max_depth = 99999;
kret = (kern_return_t)vm_region_recurse_64(self_task, ®ion, ®ion_len,
&max_depth,
(vm_region_recurse_info_t) &vm_info,
&info_count);
if (kret != KERN_SUCCESS){
//[retStr appendString:@"[-] vm_region_recurse_64 faild!\n"];
return false;
}
prot = vm_info.protection & (PROT_READ | PROT_WRITE | PROT_EXEC);
inherit = vm_info.inheritance;
//[retStr appendString:@"[*] get page info done.\n"];
vm_prot_t c;
vm_prot_t m;
mach_vm_address_t target = (mach_vm_address_t)page_start;
kret = (kern_return_t)mach_vm_remap(self_task, &target, PAGE_SIZE, 0,
VM_FLAGS_OVERWRITE, self_task,
(mach_vm_address_t) new_page, true,
&c, &m, inherit);
if (kret != KERN_SUCCESS){
//[retStr appendString:@"[-] remap mach_vm_remap faild!\n"];
return false;
}
//[retStr appendString:@"[+] remap to target success!\n"];
// clear cache
void* clear_start_ = (void*)(page_start + patch_offset);
sys_icache_invalidate (clear_start_, 4);
sys_dcache_flush (clear_start_, 4);
return true;
};
// =====================================================patch code=============================================
patch_code(patch_addr, patch_data, patch_data_size);
[retStr appendString:@"patch done."];
retStr
'''
retStr = utils.exe_script(debugger, command_script)
return utils.hex_int_in_str(retStr)
def is_raw_data(data):
# pylint: disable=anomalous-backslash-in-string
pattern = "\{\s*0x[0-9a-fA-F]{2}\s*,\s*0x[0-9a-fA-F]{2}\s*,\s*0x[0-9a-fA-F]{2}\s*,\s*0x[0-9a-fA-F]{2}\s*\}"
ret = re.match(pattern, data)
if not ret:
return False
return True
def patcher(debugger, ins, addr, size):
if is_raw_data(ins):
utils.ILOG("detect you manual set ins data:{}".format(ins))
utils.ILOG("start patch text at address:{} size:{} to ins data:{}".format(hex(addr), size, ins))
patch_code(debugger, hex(addr), ins, size)
return "[x] power by xia0@2019"
supportInsList = {'nop':'0x1f, 0x20, 0x03, 0xd5 ', 'ret':'0xc0, 0x03, 0x5f, 0xd6', 'mov0':'0x00, 0x00, 0x80, 0xd2', 'mov1':'0x20, 0x00, 0x80, 0xd2'}
if ins not in supportInsList.keys():
utils.ELOG("patcher not support this ins type:{}".format(ins))
return "[x] power by xia0@2019"
utils.ILOG("start patch text at address:{} size:{} to ins:\"{}\" and data:{}".format(hex(addr), size, ins, supportInsList[ins]))
# for i in range(size):
# patch_code(debugger, hex(curPatchAddr), supportInsList[ins])
# utils.SLOG("current patch address:{} patch done".format(hex(curPatchAddr)))
# curPatchAddr += 4
ins_data = ""
for i in range(size):
ins_data += supportInsList[ins]
if i != size - 1:
ins_data += ","
build_ins_data = "{" + ins_data + "}"
utils.ILOG("make ins data:\n{}".format(build_ins_data))
patch_code(debugger, hex(addr), build_ins_data, size)
utils.SLOG("patch done")
return "[x] power by xia0@2019"
def generate_option_parser():
usage = "patcher"
parser = optparse.OptionParser(usage=usage, prog="lookup")
parser.add_option("-a", "--address",
action="store",
default=None,
dest='patchAddress',
help="need patch code address")
parser.add_option("-i", "--instrument",
action="store",
default=None,
dest='patchInstrument',
help="patch instrument type")
parser.add_option("-s", "--size",
action="store",
default=None,
dest='patchSize',
help="patch instrument count")
return parser
| 1.710938 | 2 |
python/set_pin.py | swimmadude66/RestEasy | 1 | 12769464 | <filename>python/set_pin.py
import os
import sys
import json
from Pin import pin, pin_power
def set_pin_info(pin_obj):
pin_id = pin_obj['ID']
if not (os.path.isdir('/sys/class/gpio/gpio'+str(pin_id))):
print('{\n\tsuccess: false, \n\tmessage: "pin ' + pin_id +' does not exist"\n}')
exit(-1)
pin_path = '/sys/class/gpio/gpio'+str(pin_id)+'/'
open(pin_path+'active_low', 'w').write(str(pin_obj['active_low']))
open(pin_path+'direction', 'w').write(str(pin_obj['direction']))
open(pin_path+'edge', 'w').write(str(pin_obj['edge']))
open(pin_path+'value', 'w').write(str(pin_obj['value']))
pin_power_path = pin_path +'power/'
open(pin_power_path+'async', 'w').write(str(pin_obj['power']['async']))
open(pin_power_path+'control', 'w').write(str(pin_obj['power']['control']))
open(pin_power_path+'runtime_enabled', 'w').write(str(pin_obj['power']['runtime_enabled']))
open(pin_power_path+'runtime_status', 'w').write(str(pin_obj['power']['runtime_status']))
open(pin_power_path+'runtime_active_kids', 'w').write(str(pin_obj['power']['runtime_active_kids']))
open(pin_power_path+'runtime_active_time', 'w').write(str(pin_obj['power']['runtime_active_time']))
open(pin_power_path+'runtime_suspended_time', 'w').write(str(pin_obj['power']['runtime_suspended_time']))
open(pin_power_path+'runtime_usage', 'w').write(str(pin_obj['power']['runtime_usage']))
print("{\"success\": true, \"message\": \"pin " + str(pin_id) + " updated!\"}")
if len(sys.argv) != 2:
print('Usage: python set_pin.py <pin_data_json>')
exit(1)
else:
pin_string = (sys.argv[1])
pin_data = json.loads(pin_string)
if isinstance(pin_data, list):
first = True
print('[')
for pin_dict in pin_data:
if not first:
print(',')
set_pin_info(pin_dict)
first = False
print(']')
elif isinstance(pin_data, dict):
set_pin_info(pin_data)
else:
print('{\"success\": false, \"message\": \"Json data not properly formatted as object or array\"}')
exit(1)
| 2.875 | 3 |
neutron_taas/services/taas/agents/extensions/taas.py | deepak-dt/tap-as-a-service | 68 | 12769465 | <gh_stars>10-100
# Copyright 2017 FUJITSU LABORATORIES LTD.
# Copyright 2016 NEC Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
from neutron_lib.agent import l2_extension
from neutron_taas.services.taas.agents.common import taas_agent
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.IntOpt(
'taas_agent_periodic_interval',
default=5,
help=_('Seconds between periodic task runs')
)
]
cfg.CONF.register_opts(OPTS)
@six.add_metaclass(abc.ABCMeta)
class TaasAgentDriver(object):
"""Defines stable abstract interface for TaaS Agent Driver."""
@abc.abstractmethod
def initialize(self):
"""Perform Taas agent driver initialization."""
def consume_api(self, agent_api):
"""Consume the AgentAPI instance from the TaasAgentExtension class
:param agent_api: An instance of an agent specific API
"""
@abc.abstractmethod
def create_tap_service(self, tap_service):
"""Create a Tap Service request in driver."""
@abc.abstractmethod
def create_tap_flow(self, tap_flow):
"""Create a tap flow request in driver."""
@abc.abstractmethod
def delete_tap_service(self, tap_service):
"""delete a Tap Service request in driver."""
@abc.abstractmethod
def delete_tap_flow(self, tap_flow):
"""Delete a tap flow request in driver."""
class TaasAgentExtension(l2_extension.L2AgentExtension):
def initialize(self, connection, driver_type):
"""Initialize agent extension."""
self.taas_agent = taas_agent.TaasAgentRpcCallback(
cfg.CONF, driver_type)
self.taas_agent.consume_api(self.agent_api)
self.taas_agent.initialize()
def consume_api(self, agent_api):
"""Receive neutron agent API object
Allows an extension to gain access to resources internal to the
neutron agent and otherwise unavailable to the extension.
"""
self.agent_api = agent_api
def handle_port(self, context, port):
pass
def delete_port(self, context, port):
pass
| 1.828125 | 2 |
cyclicmapper.py | jcwoods/CyclicMapper | 0 | 12769466 | #!/usr/bin/python
import sys
import math
# this class is intended to map a linear set of values into (x,y) coordinates
# translated onto the circumference of a circle.
#
# In particular, this is useful for mapping data which is cyclic in nature.
#
# Consider the geometry of building vectors for k-means clustering. Some
# data which might be used in a model is not linear in nature. This is
# especially true of days of the week, hours of the day, etc. If we assign
# the values 0 to Sunday and 6 to Saturday, (depending on the intent of the
# model!) these days may not be six days apart!
#
# Here, we map a linear value (0..6) into tuples. These tuples are x and y
# coordinates representing the position of the linear value mapped onto the
# perimeter of a circle. Instead of using a single linear dimension in our
# vector, we replace it with these two dimensions.
#
# This DOES NOT preserve the absolute distance between points, but it does
# preserve a relative distance that works well for many applications.
class CyclicMapper:
def __init__(self, maxValue, r = None, c = None, d = None):
self.twopi = math.pi * 2
# regardless of the parameter we're given, we will normalize
# to a radius here.
if r is not None:
self.radius = float(r)
elif c is not None:
self.radius = float(c) / self.twopi
#elif d is not None:
# self.radius = float(d)
else:
raise RuntimeError('missing required parameter (r, c, or d)')
self.max_val = float(maxValue) # highest value to be passed in
self.c = self.twopi / self.max_val
# make functions a local reference (global refs are expensive!)
self.sin = math.sin
self.cos = math.cos
return
def getTuple(self, val):
val = float(val)
if val < 0 or val >= self.max_val:
errMsg = 'input value {0:f} outside of range (0, {1:f})'.format(val, self.max_val)
raise ValueError(errMsg)
v = val * self.c
x = self.cos(v) * self.radius
y = self.sin(v) * self.radius
return (x,y)
| 3.78125 | 4 |
gae/handlers/__init__.py | ibagrak/algae | 3 | 12769467 | import os
import jinja2
from webapp2_extras import i18n
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader('static/templates'),extensions=['jinja2.ext.i18n'])
jinja_environment.install_gettext_callables(
i18n.gettext,
i18n.ngettext,
newstyle=True)
jinja_environment.filters.update({
'format_date' : i18n.format_date,
'format_time' : i18n.format_time,
'format_datetime' : i18n.format_datetime,
'format_timedelta': i18n.format_timedelta
})
| 2.359375 | 2 |
doc/source/EXAMPLES/mu_toworld4.py | kapteyn-astro/kapteyn | 3 | 12769468 | <reponame>kapteyn-astro/kapteyn
from kapteyn import maputils
f = maputils.FITSimage("ngc6946.fits")
# Get an XV slice at DEC=51
#f.set_imageaxes(1, 3, slicepos=51)
annim = f.Annotatedimage()
x = [10, 50, 300, 399]
y = [1, 44, 88, 401]
# Convert these to world coordinates
lon, velo = annim.toworld(x, y)
print("lon, velo =", lon, velo)
x, y = annim.topixel(lon, velo)
print("Back to pixel coordinates: x, y =", x, y)
| 2.78125 | 3 |
snippet/example/python/template.py | yp2800/snippet | 94 | 12769469 | <filename>snippet/example/python/template.py
#!/usr/bin/env python3
# -*- coding: utf8 -*-
#
# Support Python 3+
#
# Install:
# $ pip3 install gevent requests sqlalchemy pymysql
#
# Run:
# $ python template.py -h
#
import gevent.pool
import gevent.monkey
gevent.monkey.patch_all(Event=True, sys=True)
taskpool = gevent.pool.Pool(size=1000)
spawn = taskpool.spawn
import sys
import logging
LOG = logging.getLogger()
def to_bytes(v, encoding="utf-8", errors="strict"):
if isinstance(v, bytes):
return v
elif isinstance(v, str):
return v.encode(encoding, errors)
return to_bytes(str(v), encoding=encoding, errors=errors)
def to_str(v, encoding="utf-8", errors="strict"):
if isinstance(v, bytes):
return v.decode(encoding, errors)
elif isinstance(v, str):
return v
return str(v)
def init_logging(logger=None, level="INFO", file=None, handler_cls=None,
max_num=30, propagate=True, file_config=None, dict_config=None):
# Initialize the argument logger with the arguments, level and log_file.
if logger:
fmt = ("%(asctime)s - %(process)d - %(pathname)s - %(funcName)s - "
"%(lineno)d - %(levelname)s - %(message)s")
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
level = getattr(logging, level.upper())
if file:
if handler_cls:
handler = handler_cls(file, max_num)
else:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(file, when="midnight",
interval=1, backupCount=max_num)
else:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
loggers = logger if isinstance(logger, (list, tuple)) else [logger]
for logger in loggers:
logger.propagate = propagate
logger.setLevel(level)
logger.addHandler(handler)
# Initialize logging by the configuration file, file_config.
if file_config:
logging.config.fileConfig(file_config, disable_existing_loggers=False)
# Initialize logging by the dict configuration, dict_config.
if dict_config and hasattr(logging.config, "dictConfig"):
logging.config.dictConfig(dict_config)
##############################################################################
# Configuration
from argparse import ArgumentParser
class Configuration(object):
class Group(object):
def __init__(self, group_name):
self.__name = group_name
def __repr__(self):
attrs = []
for key, value in vars(self).items():
if key != "_Group__name":
attrs.append("{0}={1}".format(key, value))
return "{0}({1})".format(self.__class__.__name__, ", ".join(attrs))
def __contains__(self, name):
return hasattr(self, name)
def __getattr__(self, name):
e = "The group '{0}' has no the option '{1}'"
raise AttributeError(e.format(self.__name, name))
def __setitem__(self, name, value):
setattr(self, name, value)
def __getitem__(self, name):
try:
return getattr(self, name)
except AttributeError:
e = "The group '{0}' has no the option '{1}'"
raise KeyError(e.format(self.__name, name))
def items(self):
d = vars(self)
d.pop("_Group__name")
return d.items()
__slots__ = ["_default_group_name", "_default_group", "_allow_empty",
"_encoding", "_parsed", "_caches", "_opts", "_bool_true",
"_bool_false", "_description", "_version"]
def __init__(self, description=None, allow_empty=False, encoding="utf-8",
default_group="DEFAULT", version=None):
"""A simple configuration file parser based on the format INI.
When an configuration option does not exist, for getting one default
value, not raising an exception, please use the method of get(), or the
builtin function of getattr().
"""
self._parsed = False
self._description = description
self._default_group_name = default_group
self._default_group = Configuration.Group(self._default_group_name)
self._allow_empty = allow_empty
self._encoding = encoding
self._version = version if version else "Unknown"
self._caches = {self._default_group_name: self._default_group}
self._opts = {}
self._bool_true = ["t", "1", "on", "true"]
self._bool_false = ["f", "0", "off", "false"]
def __getattr__(self, name):
if not self._parsed:
raise Exception("Not parsed")
try:
return self._caches[name]
except KeyError:
pass
msg = "'{0}' object has no attribute '{1}'"
raise AttributeError(msg.format(self.__class__.__name__, name))
def __getitem__(self, name):
if not self._parsed:
raise Exception("Not parsed")
_name = self._uniformize(name)
try:
return self._caches[_name]
except KeyError:
pass
msg = "'{0}' has no key '{1}'"
raise KeyError(msg.format(self.__class__.__name__, name))
def __repr__(self):
attrs = ("%s=%s" % (k, v) for k, v in self._caches.items())
return "{0}({1})".format(self.__class__.__name__, ", ".join(attrs))
def _set_group_opt(self, group_name, opt_name, opt_value, force=False):
gname = group_name if group_name else self._default_group_name
group = self._caches[gname]
if hasattr(group, opt_name) and not force:
e = "The group '{0}' has had the option of '{1}'"
raise ValueError(e.format(gname, opt_name))
setattr(self._caches[gname], opt_name, opt_value)
def _register(self, name, parser, default=None, group=None, help=None, short=None):
if self._parsed:
raise Exception("Have been parsed")
name = self._uniformize(name)
group = self._uniformize(group if group else self._default_group_name)
self._opts.setdefault(group, {})
if name in self._opts[group]:
raise KeyError("The option {0} has been regisetered".format(name))
self._opts[group][name] = (parser, default, help, short)
self._caches.setdefault(group, Configuration.Group(group))
def _parse_int(self, value):
return int(value)
def _parse_float(self, value):
return float(value)
def _parse_bool(self, value):
if isinstance(value, bool):
return value
elif not isinstance(value, str):
return bool(value)
value = value.lower()
if value in self._bool_true:
return True
elif value in self._bool_false:
return False
raise ValueError("invalid bool value '{0}'".format(value))
def _parse_string(self, value):
return value.decode(self._encoding) if isinstance(value, bytes) else value
def _parse_ints(self, value):
return self._parse_list(self._parse_int, value)
def _parse_strings(self, value):
return self._parse_list(self._parse_string, value)
def _parse_list(self, parser, value):
if isinstance(value, (list, tuple)):
vs = value
else:
vs = (v.strip() for v in value.split(",") if v.strip())
return tuple((parser(v) for v in vs))
def _uniformize(self, name):
return name.replace("-", "_")
def _unniformize(self, name):
return name.replace("_", "-")
def parsed(self):
"""Return True if it has been parsed, or False."""
return self._parsed
def parse_files(self, filenames=""):
"""Parse the INI configuration files.
The argument is either a string standing for the path of the
configuration file, or a list of them.
"""
if self._parsed:
raise Exception("Have been parsed")
self._parsed = True
if filenames:
if not isinstance(filenames, (list, tuple)):
filenames = self._parse_string(filenames).strip(", ").split(",")
for filename in filenames:
self._parse_file(filename)
self._check_and_fix()
def _check_and_fix(self):
for gname, opts in self._opts.items():
group = self._caches[gname]
for name, opt in opts.items():
if name in group:
continue
elif opt[1] is not None or opt[0] == self._parse_bool:
self._set_group_opt(gname, name, opt[1])
continue
if not self._allow_empty:
msg = "The option '{0}' in the group '{1}' has no value."
raise ValueError(msg.format(name, gname))
# Set the options in the default group into self.
group = self._caches.pop(self._default_group_name)
for key, value in group.items():
if key in self._caches:
msg = "'{0}' had has the value '{1}'"
raise ValueError(msg.format(self.__class__.__name__, key))
self._caches[key] = value
def _parse_file(self, filename):
filename = str(filename)
with open(filename) as f:
lines = f.readlines()
gname = self._default_group_name
index, max_index = 0, len(lines)
while index < max_index:
line = self._parse_string(lines[index]).strip()
index += 1
# Comment
if not line or line[0] in ("#", "=", ";"):
continue
# Group Section
if line[0] == "[":
if line[-1] != "]":
m = ("the format of the group is wrong, "
"which must start with [ and end with ]")
raise ValueError(m)
_gname = line[1:-1]
if not _gname:
raise ValueError("the group name is empty")
if _gname not in self._caches:
continue
gname = _gname
continue
# Group Option Values
items = line.split("=", 1)
if len(items) != 2:
raise ValueError("the format is wrong, must contain '=': " + line)
name, value = self._uniformize(items[0].strip()), items[1].strip()
# Handle the continuation line
if value[-1:] == "\\":
values = [value.rstrip("\\").strip()]
while index < max_index:
value = lines[index].strip()
values.append(value.rstrip("\\").strip())
index += 1
if value[-1:] != "\\":
break
value = "\n".join(values)
opt = self._opts[gname].get(name, None)
if opt:
self._set_group_opt(gname, name, opt[0](value))
def register_bool(self, name, short=None, default=None, group=None, help=None):
"""Register the bool option.
The value of this option will be parsed to the type of bool.
"""
self._register(name, self._parse_bool, short=short, default=default,
group=group, help=help)
def register_int(self, name, short=None, default=None, group=None, help=None):
"""Register the int option.
The value of this option will be parsed to the type of int.
"""
self._register(name, self._parse_int, short=short, default=default,
group=group, help=help)
def register_float(self, name, short=None, default=None, group=None, help=None):
"""Register the float option.
The value of this option will be parsed to the type of float.
"""
self._register(name, self._parse_float, short=short, default=default,
group=group, help=help)
def register_str(self, name, short=None, default=None, group=None, help=None):
"""Register the str option.
The value of this option will be parsed to the type of str.
"""
self._register(name, self._parse_string, short=short, default=default,
group=group, help=help)
def register_int_list(self, name, short=None, default=None, group=None, help=None):
"""Register the int list option.
The value of this option will be parsed to the type of int list.
"""
self._register(name, self._parse_ints, short=short, default=default,
group=group, help=help)
def register_str_list(self, name, short=None, default=None, group=None, help=None):
"""Register the string list option.
The value of this option will be parsed to the type of string list.
"""
self._register(name, self._parse_strings, short=short, default=default,
group=group, help=help)
###########################################################################
# Parse CLI
def parse(self, *args, **kwargs):
return self.parse_cli(*args, **kwargs)
def parse_cli(self, args=None, config_file_name="config-file"):
"""Parse the cli options."""
if self._parsed:
raise Exception("Have been parsed")
self._parsed = True
if args is None:
args = sys.argv[1:]
if not args:
self._check_and_fix()
return None
gopts, args = self._parser_cli(args, description=self._description,
config_file_name=config_file_name)
if getattr(args, "version", False):
print(self._version)
sys.exit(0)
if config_file_name:
config_file = getattr(args, self._uniformize(config_file_name), "")
for filename in config_file.split(","):
filename = filename.strip()
if filename:
self._parse_file(filename)
for cli_opt, (gname, name) in gopts.items():
opt = self._opts[gname][name]
value = getattr(args, cli_opt, None)
if value is not None:
value = opt[0](value)
if value != opt[1]:
self._set_group_opt(gname, name, value, force=True)
self._check_and_fix()
return args
def _parser_cli(self, args, description=None, config_file_name=None):
cli = ArgumentParser(description=description)
if config_file_name:
cli.add_argument("--" + config_file_name, default="",
help="The config file path.")
cli.add_argument("--version", action="store_true",
help="Print the version and exit.")
group_opts = {}
for gname, opts in self._opts.items():
if gname == self._default_group_name:
group = cli
else:
group = cli.add_argument_group(gname)
for name, (parser, default, help, short) in opts.items():
action = None
if parser == self._parse_bool:
action = "store_false" if default else "store_true"
default = False if default is None else default
if gname == self._default_group_name:
opt_name = self._unniformize(name)
opt_key = self._uniformize(name)
else:
opt_name = self._unniformize("{0}-{1}".format(gname, name))
opt_key = self._uniformize(opt_name)
group_opts[opt_key] = (gname, name)
short = "-" + short if short and short[0] != "-" else short
names = [short, "--" + opt_name] if short else ["--" + opt_name]
group.add_argument(*names, action=action, default=default, help=help)
return group_opts, cli.parse_args(args=args)
# Configuration End
###############################################################################
###############################################################################
# Common
import requests
from urllib.parse import quote as qs_quote
def send_http_get(url, quote=True, use_key=False, co="?", timeout=5, json=False,
raise404=True, has_result=True, headers=None, **ks):
if ks:
to = lambda v: qs_quote(v) if quote else v
ks = {k: to(v() if callable(v) else v) for k, v in ks.items() if v is not None}
if use_key:
url = co.join((url, "&".join(("%s=%s" % (k, v) for k, v in ks.items()))))
else:
url = url.format(**ks)
if json:
if headers:
headers["Accept"] = "application/json"
else:
headers = {"Accept": "application/json"}
resp = requests.get(url, headers=headers, timeout=timeout)
status_code = resp.status_code
if status_code == 404:
if raise404:
raise Exception("not found %s" % url)
return None
elif status_code == 200:
if has_result:
return resp.json() if json else resp.content
return None
elif status_code == 204:
return None
raise OSError("%s: status_code=%s" % (url, status_code))
# Common End
###############################################################################
###############################################################################
# DB Common
from sqlalchemy import create_engine, text as sql_text
from sqlalchemy.orm import sessionmaker, object_mapper
from sqlalchemy.sql.elements import TextClause
class DB:
"""Manager the DB connection."""
def __init__(self, write_connection, read_connection=None, autocommit=True,
expire_on_commit=False, echo=False, encoding="utf8",
poolclass=None, pool=None, min_pool_size=2, max_pool_size=5,
pool_timeout=30, idle_timeout=3600, base=None):
write_connection = self._fix_charset(write_connection, encoding)
if read_connection:
read_connection = self._fix_charset(read_connection, encoding)
kwargs = {
"echo": echo,
"encoding": encoding,
"poolclass": poolclass,
"pool": pool,
"pool_size": min_pool_size,
"pool_timeout": pool_timeout if pool_timeout else None,
"pool_recycle": idle_timeout,
"max_overflow": max_pool_size - min_pool_size,
"convert_unicode": True,
}
self._base = base
self._autocommit = autocommit
self._expire_on_commit = expire_on_commit
self._write_engine = self._create_engine(write_connection, kwargs)
self._write_session_cls = self._get_session_cls(self._write_engine)
if read_connection:
self._read_engine = self._create_engine(read_connection, kwargs)
self._read_session_cls = self._get_session_cls(self._read_engine)
else:
self._read_engine = self._write_engine
self._read_session_cls = self._write_session_cls
def _fix_charset(self, connection, encoding):
if "mysql" in connection and "charset=" not in connection:
if "?" in connection:
return "%s&charset=%s" % (connection, encoding)
return "%s?charset=%s" % (connection, encoding)
return connection
def _create_engine(self, connection, kwargs):
if connection.startswith("sqlite:///"):
kwargs.pop("pool_size", None)
kwargs.pop("pool_timeout", None)
kwargs.pop("max_overflow", None)
return create_engine(connection, **kwargs)
def _get_session_cls(self, engine):
return sessionmaker(bind=engine, autocommit=self._autocommit,
expire_on_commit=self._expire_on_commit)
def create_tables(self, base=None):
(base or self._base).metadata.create_all(self._write_engine)
def get_write_session(self):
return self._write_session_cls()
def get_read_session(self):
return self._read_session_cls()
def get_session(self):
return self.get_write_session()
def execute(self, sql, session=None, **kwargs):
if not isinstance(sql, TextClause):
sql = sql_text(sql)
return (session or self.get_session()).execute(sql, kwargs)
def fetchall(self, sql, **kwargs):
return self.execute(sql, self.get_read_session(), **kwargs).fetchall()
def fetchone(self, sql, **kwargs):
return self.execute(sql, self.get_read_session(), **kwargs).fetchone()
def first(self, sql, **kwargs):
return self.execute(sql, self.get_read_session(), **kwargs).first()
class ModelBase:
"""Base class for models."""
__tablename__ = ""
__table_initialized__ = False
def save(self, session):
"""Save this object."""
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __repr__(self):
attrs = ", ".join(("%s=%s" % (k, v) for k, v in self.items()))
return "%s(%s)" % (self.__tablename__.title(), attrs)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def __contains__(self, key):
# Don't use hasattr() because hasattr() catches any exception, not only
# AttributeError. We want to passthrough SQLAlchemy exceptions
# (ex: sqlalchemy.orm.exc.DetachedInstanceError).
try:
getattr(self, key)
except AttributeError:
return False
else:
return True
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
return ModelIterator(self, iter(dict(object_mapper(self).columns).keys()))
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in values.items():
setattr(self, k, v)
def _as_dict(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict((key, value) for key, value in self)
joined = dict([(k, v) for k, v in self.__dict__.items() if not k[0] == '_'])
local.update(joined)
return local
def items(self):
"""Make the model object behave like a dict."""
return self._as_dict().items()
def keys(self):
"""Make the model object behave like a dict."""
return [key for key, value in self.items()]
class ModelIterator:
def __init__(self, model, columns):
self.model = model
self.i = columns
def __iter__(self):
return self
def __next__(self):
n = next(self.i)
return n, getattr(self.model, n)
# DB Common End
###############################################################################
###############################################################################
# DB
# from datetime import datetime
# from sqlalchemy import Column, String, Boolean, Integer, DateTime
# from sqlalchemy.sql import func, expression as expr
from sqlalchemy.ext.declarative import declarative_base
BASE = declarative_base()
class Model(ModelBase, BASE):
__tablename__ = "table"
class DBAPI(DB):
pass
# DB End
###############################################################################
def main(version="1.0.0"):
conf = Configuration(description="", version=version)
conf.register_str("log_level", default="INFO",
help="The level of the log, such as debug, info, etc.")
conf.register_str("log_file", default="", help="The file path of the log.")
conf.register_int("thread_num", default=0, help="The size of the coroutine pool.")
conf.parse()
if conf.thread_num > 0:
global taskpool, spawn
taskpool = gevent.pool.Pool(size=conf.thread_num)
spawn = taskpool.spawn
init_logging(LOG, conf.log_level, conf.log_file)
# TODO:)
if __name__ == "__main__":
main()
| 2.171875 | 2 |
SimCompanies/kind_ids.py | Gunak/SimCompaniesCLI | 4 | 12769470 | <filename>SimCompanies/kind_ids.py
KIND_IDS = {
} | 1.101563 | 1 |
API_Hardware/migrations/0001_initial.py | MTC-Hack/API_Mobile | 0 | 12769471 | <filename>API_Hardware/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-29 04:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('rat', '0009_auto_20170929_0120'),
]
operations = [
migrations.CreateModel(
name='AccelerationExcess',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ControlExcess',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ControlTelemetry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.BigIntegerField(default=0, verbose_name='datetime')),
('torque', models.FloatField(default=0, verbose_name='torque')),
('breake', models.FloatField(default=0, verbose_name='breake')),
('rpm', models.FloatField(default=0, verbose_name='rpm')),
('vehicle', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='rat.Vehicle')),
],
),
migrations.CreateModel(
name='LocationTelemetry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.BigIntegerField(default=0, verbose_name='datetime')),
('spd', models.FloatField(default=0, verbose_name='spd')),
('latitude', models.FloatField(default=0, verbose_name='latitude')),
('longitude', models.FloatField(default=0, verbose_name='longitude')),
('vehicle', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='rat.Vehicle')),
],
),
migrations.CreateModel(
name='SpeedExcess',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('limit', models.IntegerField(default=0, verbose_name='limit')),
('loc_telemetry', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='API_Hardware.LocationTelemetry')),
],
),
migrations.CreateModel(
name='SpeedTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('loc_telemetry', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='API_Hardware.LocationTelemetry')),
],
),
migrations.CreateModel(
name='Telemetry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.BigIntegerField(default=0, verbose_name='datetime')),
('fuel', models.FloatField(default=0, verbose_name='fuel')),
('acc_x', models.FloatField(default=0, verbose_name='acc_x')),
('acc_y', models.FloatField(default=0, verbose_name='acc_y')),
('acc_z', models.FloatField(default=0, verbose_name='acc_z')),
('vehicle', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='rat.Vehicle')),
],
),
migrations.AddField(
model_name='controlexcess',
name='control_telemetry',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='API_Hardware.ControlTelemetry'),
),
migrations.AddField(
model_name='accelerationexcess',
name='telemetry',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='API_Hardware.Telemetry'),
),
]
| 1.585938 | 2 |
tensorflow_1_x/7_kaggle/learntools/pandas/grouping_and_sorting.py | amitbcp/machine_learning_with_Scikit_Learn_and_TensorFlow | 11 | 12769472 | <filename>tensorflow_1_x/7_kaggle/learntools/pandas/grouping_and_sorting.py
import pandas as pd
from learntools.core import *
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
class ReviewsWritten(EqualityCheckProblem):
_var = 'reviews_written'
_expected = reviews.groupby('taster_twitter_handle').size()
_hint = "Use the `groupby` operation and `size()` (or `count()`)."
_solution = """
```python
reviews_written = reviews.groupby('taster_twitter_handle').size()
```
or
```python
reviews_written = reviews.groupby('taster_twitter_handle').taster_twitter_handle.count()
```
"""
class BestRatingPerPrice(EqualityCheckProblem):
_var = 'best_rating_per_price'
_expected = reviews.groupby('price')['points'].max().sort_index()
_hint = "Use `max()` and `sort_index()`. The relevant columns in the DataFrame are `price` and `points`."
_solution = CS("best_rating_per_price = reviews.groupby('price')['points'].max().sort_index()")
class PriceExtremes(EqualityCheckProblem):
_var = 'price_extremes'
_expected = reviews.groupby('variety').price.agg([min, max])
_hint = "Use `agg()`."
_solution = CS("price_extremes = reviews.groupby('variety').price.agg([min, max])")
class SortedVarieties(EqualityCheckProblem):
_var = 'sorted_varieties'
_expected = reviews.groupby('variety').price.agg([min, max]).sort_values(by=['min', 'max'], ascending=False)
_hint = "Use `sort_values()`, and provide a list of names to sort by."
_solution = CS("sorted_varieties = price_extremes.sort_values(by=['min', 'max'], ascending=False)")
class ReviewerMeanRatings(EqualityCheckProblem):
_var = 'reviewer_mean_ratings'
_expected = reviews.groupby('taster_name').points.mean()
_hint = "Use `mean()`."
_solution = CS("reviewer_mean_ratings = reviews.groupby('taster_name').points.mean()")
class GroupbyCountryVariety(EqualityCheckProblem):
_var = 'country_variety_counts'
_expected = reviews.groupby(['country', 'variety']).size().sort_values(ascending=False)
_hint = "Use `groupby()`, and provide a list of columns to group by. You may also find `size()` and `sort_values()` to be useful!"
_solution = CS("country_variety_counts = reviews.groupby(['country', 'variety']).size().sort_values(ascending=False)")
qvars = bind_exercises(globals(), [
ReviewsWritten,
BestRatingPerPrice,
PriceExtremes,
SortedVarieties,
ReviewerMeanRatings,
GroupbyCountryVariety,
],
tutorial_id=48,
)
__all__ = list(qvars)
| 3.21875 | 3 |
tests/compas/datastructures/test_halfedge.py | duchaoyu/compas | 0 | 12769473 | <reponame>duchaoyu/compas<filename>tests/compas/datastructures/test_halfedge.py
import pytest
import compas
from compas.datastructures import HalfEdge
# ==============================================================================
# Fixtures
# ==============================================================================
@pytest.fixture
def mesh():
vertices = [None, None, None, None]
faces = [[0, 1, 2], [0, 3, 1]]
he = HalfEdge()
for vertex in vertices:
he.add_vertex()
for face in faces:
he.add_face(face)
return he
@pytest.fixture
def vertex_key():
return 2
@pytest.fixture
def face_key():
return 1
@pytest.fixture
def edge_key():
return (0, 1)
# ==============================================================================
# Tests - Schema & jsonschema
# ==============================================================================
def test_edgedata_nondirectionality(mesh):
mesh.update_default_edge_attributes({'index': 0})
for index, (u, v) in enumerate(mesh.edges()):
mesh.edge_attribute((u, v), 'index', index)
assert all(mesh.edge_attribute((u, v), 'index') == mesh.edge_attribute((v, u), 'index') for u, v in mesh.edges())
def test_edgedata_io(mesh):
mesh.update_default_edge_attributes({'index': 0})
for index, (u, v) in enumerate(mesh.edges()):
mesh.edge_attribute((u, v), 'index', index)
other = HalfEdge.from_data(mesh.data)
assert all(other.edge_attribute(edge, 'index') == index for index, edge in enumerate(other.edges()))
def test_data_schema(mesh):
if not compas.IPY:
mesh.validate_data()
def test_json_schema(mesh):
if not compas.IPY:
mesh.validate_json()
# ==============================================================================
# Tests - Samples
# ==============================================================================
def test_vertex_sample(mesh):
for vertex in mesh.vertex_sample():
assert mesh.has_vertex(vertex)
for vertex in mesh.vertex_sample(size=mesh.number_of_vertices()):
assert mesh.has_vertex(vertex)
def test_edge_sample(mesh):
for edge in mesh.edge_sample():
assert mesh.has_edge(edge)
for edge in mesh.edge_sample(size=mesh.number_of_edges()):
assert mesh.has_edge(edge)
def test_face_sample(mesh):
for face in mesh.face_sample():
assert mesh.has_face(face)
for face in mesh.face_sample(size=mesh.number_of_faces()):
assert mesh.has_face(face)
# ==============================================================================
# Tests - Vertex Attributes
# ==============================================================================
def test_default_vertex_attributes():
he = HalfEdge(name='test', default_vertex_attributes={'a': 1, 'b': 2})
for vertex in he.vertices():
assert he.vertex_attribute(vertex, name='a') == 1
assert he.vertex_attribute(vertex, name='b') == 2
he.vertex_attribute(vertex, name='a', value=3)
assert he.vertex_attribute(vertex, name='a') == 3
def test_vertex_attributes_key_not_found(mesh):
with pytest.raises(KeyError):
mesh.vertex_attributes(mesh.number_of_vertices() + 1)
def test_vertex_attributes_from_defaults(mesh):
mesh.update_default_vertex_attributes({"foo": "bar"})
assert mesh.vertex_attributes(mesh.get_any_vertex())["foo"] == "bar"
def test_vertex_attributes_not_in_defaults(mesh):
mesh.update_default_vertex_attributes({"foo": "bar"})
attrs = mesh.vertex_attributes(mesh.get_any_vertex())
with pytest.raises(KeyError):
attrs["baz"]
def test_get_vertex_attribute_from_view(mesh, vertex_key):
mesh.vertex_attribute(vertex_key, name="foo", value="bar")
attrs = mesh.vertex_attributes(vertex_key)
assert attrs["foo"] == "bar"
def test_set_vertex_attribute_in_view(mesh, vertex_key):
attrs = mesh.vertex_attributes(vertex_key)
attrs["foo"] = "bar"
assert mesh.vertex_attribute(vertex_key, name="foo") == "bar"
def test_del_vertex_attribute_in_view(mesh, vertex_key):
mesh.vertex_attribute(vertex_key, name="foo", value="bar")
attrs = mesh.vertex_attributes(vertex_key)
del attrs["foo"]
with pytest.raises(KeyError):
attrs["foo"]
# ==============================================================================
# Tests - Face Attributes
# ==============================================================================
def test_default_face_attributes():
he = HalfEdge(name='test', default_face_attributes={'a': 1, 'b': 2})
for face in he.vertices():
assert he.face_attribute(face, name='a') == 1
assert he.face_attribute(face, name='b') == 2
he.face_attribute(face, name='a', value=3)
assert he.face_attribute(face, name='a') == 3
def test_face_attributes_is_empty(mesh):
assert mesh.face_attributes(mesh.get_any_face()) == {}
def test_face_attributes_from_defaults(mesh):
mesh.update_default_face_attributes({"foo": "bar"})
assert mesh.face_attributes(mesh.get_any_face())["foo"] == "bar"
def test_face_attributes_not_in_defaults(mesh):
mesh.update_default_face_attributes({"foo": "bar"})
attrs = mesh.face_attributes(mesh.get_any_face())
with pytest.raises(KeyError):
attrs["baz"]
def test_get_face_attribute_from_view(mesh, face_key):
mesh.face_attribute(face_key, name="foo", value="bar")
attrs = mesh.face_attributes(face_key)
assert attrs["foo"] == "bar"
def test_set_face_attribute_in_view(mesh, face_key):
attrs = mesh.face_attributes(face_key)
attrs["foo"] = "bar"
assert mesh.face_attribute(face_key, name="foo") == "bar"
def test_del_face_attribute_in_view(mesh, face_key):
mesh.face_attribute(face_key, name="foo", value="bar")
attrs = mesh.face_attributes(face_key)
del attrs["foo"]
with pytest.raises(KeyError):
attrs["foo"]
# ==============================================================================
# Tests - Edge Attributes
# ==============================================================================
def test_default_edge_attributes():
he = HalfEdge(name='test', default_edge_attributes={'a': 1, 'b': 2})
for edge in he.vertices():
assert he.edge_attribute(edge, name='a') == 1
assert he.edge_attribute(edge, name='b') == 2
he.edge_attribute(edge, name='a', value=3)
assert he.edge_attribute(edge, name='a') == 3
def test_edge_attributes_is_empty(mesh, edge_key):
assert mesh.edge_attributes(edge_key) == {}
def test_edge_attributes_from_defaults(mesh, edge_key):
mesh.update_default_edge_attributes({"foo": "bar"})
assert mesh.edge_attributes(edge_key)["foo"] == "bar"
def test_edge_attributes_not_in_defaults(mesh, edge_key):
mesh.update_default_edge_attributes({"foo": "bar"})
attrs = mesh.edge_attributes(edge_key)
with pytest.raises(KeyError):
attrs["baz"]
def test_get_edge_attribute_from_view(mesh, edge_key):
mesh.edge_attribute(edge_key, name="foo", value="bar")
attrs = mesh.edge_attributes(edge_key)
assert attrs["foo"] == "bar"
def test_set_edge_attribute_in_view(mesh, edge_key):
attrs = mesh.edge_attributes(edge_key)
attrs["foo"] = "bar"
assert mesh.edge_attribute(edge_key, name="foo") == "bar"
def test_del_edge_attribute_in_view(mesh, edge_key):
mesh.edge_attribute(edge_key, name="foo", value="bar")
attrs = mesh.edge_attributes(edge_key)
del attrs["foo"]
with pytest.raises(KeyError):
attrs["foo"]
| 2.1875 | 2 |
ipydatawidgets/ndarray/widgets.py | consideRatio/ipydatawidgets | 29 | 12769474 | <reponame>consideRatio/ipydatawidgets
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""
Data widgets for numpy arrays.
"""
from contextlib import contextmanager
from ipywidgets import register
from traitlets import Unicode, Set, Undefined, Int, validate
import numpy as np
import six
from ..widgets import DataWidget
from .traits import NDArray
from .serializers import compressed_array_serialization
class NDArrayBase(DataWidget):
"""A common base class for NDArray-based widgets
"""
@property
def shape(self):
return self._get_shape()
@property
def dtype(self):
return self._get_dtype()
def _get_shape(self):
raise NotImplementedError()
def _get_dtype(self):
raise NotImplementedError()
@register
class NDArraySource(NDArrayBase):
"""Base class for widgets that supplies an ndarray in the front-end only.
"""
pass
@register
class NDArrayWidget(NDArrayBase):
"""A widget representing an arbitrary array.
This is useful when several widgets might share the
same array. This would allow better synchronization.
More specific subtypes can be used instead to allow for implied
types and dimensionality checks.
"""
_model_name = Unicode('NDArrayModel').tag(sync=True)
_segments_to_send = Set()
array = NDArray().tag(sync=True, **compressed_array_serialization)
compression_level = Int(0,
help='If above 0, compress the data with zlib during serialization. '
'Note: It is often more efficient to turn on compression on the '
'notebook application level than to use this option.').tag(sync=True)
def __init__(self, array=Undefined, **kwargs):
self._instance_validators = set()
super(NDArrayWidget, self).__init__(array=array, **kwargs)
def _get_shape(self):
return self.array.shape
def _get_dtype(self):
return self.array.dtype
@validate('array')
def _validate_array(self, proposal):
"""Validate array against external validators (instance only)
This allows others to add constraints on the array of this
widget dynamically. Internal use is so that a constrained
DataUnion can validate the array of a widget set to itself.
"""
value = proposal['value']
for validator in self._instance_validators:
value = validator(value)
return value
def notify_changed(self):
"""Use this to mark that the array is changed.
This will cause the array to be synced as it normally would
after a change, and is useful when the array has been modified
in-place.
This respects hold_trait_notifications and hold_sync.
"""
self._notify_trait('array', self.array, self.array)
def sync_segment(self, segments):
"""Sync a segments of contiguous memory.
By only syncing a segment of the array, a full transmission of the
updated array is avoided. However, this does put the responsibility
of ensuring the correct sync state on the caller.
This respects hold_sync, so several segments can be stacked with
multiple calls when holding the sync.
Parameters
----------
segments : iterable of two-tuples
An iterable collection of segments represented by (start, stop) tuples.
"""
if self._holding_sync:
self._segments_to_send.add(*segments)
else:
self.send_segment(segments)
def send_segment(self, segments):
"""Send segments to the front-end.
Note: This does not respect hold_sync. If that is wanted, use
sync_segment instead.
Parameters
----------
segments : iterable of two-tuples
An iterable collection of segments represented by (start, stop) tuples.
"""
starts = []
buffers = []
raveled = np.ravel(self.array, order='C')
length = len(raveled)
for s in segments:
starts.append(s[0] if s[0] >= 0 else length - s[0])
buffers.append(np.ascontiguousarray(raveled[s[0]:s[1]]))
msg = {'method': 'update_array_segment', 'name': 'array', 'starts': starts}
self._send(msg, buffers)
@contextmanager
def hold_sync(self):
with super(NDArrayWidget, self).hold_sync():
try:
yield
finally:
if self._segments_to_send:
self.send_segment(self._segments_to_send)
self._segments_to_send.clear()
# Signature SHOULD be create_constrained_arraywidget(*validators, dtype=None),
# but this is not supported by Python 2.7. For Python 3, we try to be
# helpful by overriding the signature below.
def create_constrained_arraywidget(*validators, **kwargs):
"""Returns a subclass of NDArrayWidget with a constrained array.
Accepts keyword argument 'dtype' in addition to any valdiators.
"""
dtype = kwargs.pop('dtype', None)
return type('ConstrainedNDArrayWidget', (NDArrayWidget,), {
'array': NDArray(dtype=dtype).tag(
sync=True,
**compressed_array_serialization
).valid(*validators)
})
if six.PY3:
from inspect import Signature, Parameter
create_constrained_arraywidget.__signature__ = Signature(parameters=(
Parameter('validators', Parameter.VAR_POSITIONAL),
Parameter('dtype', Parameter.KEYWORD_ONLY, default=None),
))
def ConstrainedNDArrayWidget(*validators, **kwargs):
import warnings
warnings.warn('ConstrainedNDArrayWidget is deprecated, '
'use create_constrained_arraywidget instead')
return create_constrained_arraywidget(*validators, **kwargs)
| 2.421875 | 2 |
src/python/ag1k/phase1_ar3.py | wtsi-team112/agam-report-base | 0 | 12769475 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import os
import pyfasta
import allel
import seaborn as sns
import petl as etl
import h5py
import pandas
title = 'Phase 1 AR3 release'
pop_ids = 'AOM', 'BFM', 'GWA', 'GNS', 'BFS', 'CMS', 'GAS', 'UGS', 'KES'
pop_labels = {
'AOM': 'AO $coluzzii$',
'BFM': 'BF $coluzzii$',
'GWA': 'GW',
'GNS': 'GN $gambiae$',
'BFS': 'BF $gambiae$',
'CMS': 'CM $gambiae$',
'UGS': 'UG $gambiae$',
'GAS': 'GA $gambiae$',
'KES': 'KE',
'colony': 'colony',
}
pop_colors = {
'AOM': sns.color_palette('YlOrBr', 5)[4],
'BFM': sns.color_palette('Reds', 3)[1],
'GWA': sns.color_palette('YlOrBr', 5)[1],
'GNS': sns.color_palette('Blues', 3)[0],
'BFS': sns.color_palette('Blues', 3)[1],
'CMS': sns.color_palette('Blues', 3)[2],
'UGS': sns.color_palette('Greens', 2)[0],
'GAS': sns.color_palette('Greens', 2)[1],
'KES': sns.color_palette('Greys', 5)[2],
'colony': sns.color_palette('Greys', 5)[-1]
}
# convert to hex notation for ease of use elsewhere
for p in pop_colors:
h = '#%02x%02x%02x' % tuple(int(255*c) for c in pop_colors[p])
# chromatin
_data_chromatin = b"""CHX chro X 20009764 24393108
CH2R chro 2R 58984778 61545105
CH2L chro 2L 1 2431617
PEU2L chro 2L 2487770 5042389
IH2L chro 2L 5078962 5788875
IH3R chro 3R 38988757 41860198
CH3R chro 3R 52161877 53200684
CH3L chro 3L 1 1815119
PEU3L chro 3L 1896830 4235209
IH3L chro 3L 4264713 5031692
"""
tbl_chromatin = (
etl
.fromtext(etl.MemorySource(_data_chromatin))
.split('lines', '\s+', ['name', 'type', 'chrom', 'start', 'stop'])
.convert(('start', 'stop'), int)
.cutout('type')
)
# genome regions
region_X_speciation = 'X-speciation', 'X', 15000000, 24000000
region_X_free = 'X-free', 'X', 1, 14000000
region_3L_free = '3L-free', '3L', 15000000, 41000000
region_3R_free = '3R-free', '3R', 1, 37000000
# noinspection PyGlobalUndefined
def init(release_dir, load_geneset=False):
"""Initialise data resources.
Parameters
----------
release_dir : string
Local filesystem path where data from the release are stored.
load_geneset : string
If True, load geneset into memory.
"""
# reference sequence
####################
global genome_fn, genome
genome_dir = os.path.join(release_dir, 'genome')
genome_fn = os.path.join(genome_dir, 'Anopheles-gambiae-PEST_CHROMOSOMES_AgamP3.fa')
if os.path.exists(genome_fn):
genome = pyfasta.Fasta(genome_fn)
# genome annotations
####################
global geneset_agamp42_fn, geneset_agamp42
geneset_dir = os.path.join(release_dir, 'geneset')
geneset_agamp42_fn = os.path.join(
geneset_dir,
'Anopheles-gambiae-PEST_BASEFEATURES_AgamP4.2.sorted.gff3.gz')
if os.path.exists(geneset_agamp42_fn) and load_geneset:
geneset_agamp42 = allel.FeatureTable.from_gff3(geneset_agamp42_fn)
# variant callsets
##################
global callset, callset_pass
variation_dir = os.path.join(release_dir, 'variation')
# main callset
callset_h5_fn = os.path.join(variation_dir, 'main', 'hdf5', 'ag1000g.phase1.ar3.h5')
if os.path.exists(callset_h5_fn):
callset = h5py.File(callset_h5_fn, mode='r')
# main callset, PASS variants only
callset_pass_h5_fn = os.path.join(variation_dir, 'main', 'hdf5', 'ag1000g.phase1.ar3.pass.h5')
if os.path.exists(callset_pass_h5_fn):
callset_pass = h5py.File(callset_pass_h5_fn, mode='r')
# accessibility
###############
global accessibility
accessibility_dir = os.path.join(release_dir, 'accessibility')
accessibility_fn = os.path.join(accessibility_dir, 'accessibility.h5')
if os.path.exists(accessibility_fn):
accessibility = h5py.File(accessibility_fn, mode='r')
# sample metadata
#################
global samples_fn, tbl_samples, lkp_samples, sample_ids, df_samples
samples_dir = os.path.join(release_dir, 'samples')
samples_fn = os.path.join(samples_dir, 'samples.all.txt')
if os.path.exists(samples_fn):
tbl_samples = (
etl
.fromtsv(samples_fn)
.convert(('index', 'year', 'n_sequences', 'kt_2la', 'kt_2rb'), int)
.convert(('mean_coverage', 'latitude', 'longitude') + tuple(range(20, 36)), float)
)
lkp_samples = tbl_samples.recordlookupone('ox_code')
sample_ids = tbl_samples.values('ox_code').list()
df_samples = pandas.read_csv(samples_fn, sep='\t', index_col='index')
# extras
########
global allele_counts, allele_counts_gq10, outgroup_alleles, outgroup_allele_counts, \
outgroup_species
extras_dir = os.path.join(release_dir, 'extras')
# allele counts
allele_counts_fn = os.path.join(extras_dir, 'allele_counts.h5')
if os.path.exists(allele_counts_fn):
allele_counts = h5py.File(allele_counts_fn, mode='r')
allele_counts_gq10_fn = os.path.join(extras_dir, 'allele_counts.gq10.h5')
if os.path.exists(allele_counts_gq10_fn):
allele_counts_gq10 = h5py.File(allele_counts_gq10_fn, mode='r')
# outgroup data
outgroup_species = 'arab', 'meru', 'mela', 'quad', 'epir', 'chri'
outgroup_alleles_fn = os.path.join(extras_dir, 'outgroup_alleles.h5')
if os.path.exists(outgroup_alleles_fn):
outgroup_alleles = h5py.File(outgroup_alleles_fn, mode='r')
outgroup_allele_counts_fn = os.path.join(extras_dir, 'outgroup_allele_counts.h5')
if os.path.exists(outgroup_allele_counts_fn):
outgroup_allele_counts = h5py.File(outgroup_allele_counts_fn, mode='r')
| 1.890625 | 2 |
np/reference/ch6code/determinant.py | focusunsink/study_python | 0 | 12769476 | import numpy as np
A = np.mat("3 4;5 6")
print "A\n", A
print "Determinant", np.linalg.det(A)
| 3.015625 | 3 |
core/views.py | techacademypython/hackaton_amada | 0 | 12769477 | <gh_stars>0
from django.shortcuts import render
# Create your views here.
from .models import *
from django.http import JsonResponse
def cordinates(request):
my_model = Device.objects.filter(id=1).last()
temp = my_model.get_cordinates()
temp = temp.split(",")
newtemp = []
for i in temp :
newtemp.append(float(i))
print(newtemp)
return JsonResponse({
"coordinates": newtemp})
def notfication(request):
my_notf = Notfication.objects.filter(device__id=1).last()
return JsonResponse({
"status": True,
"notication": my_notf.notfication()
})
def index(request):
context = {}
# context["marker"] = Fuel_of_Device.objects.all()
data = Device.objects.all()
notfication = Notfication.objects.all().order_by("-id")[:5]
# ''' device locakitino''''
result = []
col = []
notf = []
order = 0
for item in data:
order = order + 1
try:
if item.humudity < 25:
color = "bg-success"
elif item.humudity < 51:
color = "yellow"
elif item.humudity < 75:
color = "orange"
else:
color = "bg-danger"
print(data)
# new_data = Fuel_of_Device.objects.get(
# device_imei__device_imei=item.device_imei)
result.append({
"device_id": item.imei,
"coords": {"lat": item.device_lat, "lng": item.device_long},
"device_icon": item.new_info(),
})
col.append({
"text": "{} {} {} {} {} {}".format(order, item.imei or int(0), item.tempureture or int(0),
item.humudity or int(0), float(item.pressure)/100 or int(0), item.time_s or int(0)).split(),
"battery": color,
})
except Exception as err:
print(err)
for i in notfication:
order = order + 1
notf.append({
"notfication": "{} {} {}".format(i.id, i.device.imei or int(0), i.time_s or int(0)).split(),
"not" : i.notification_type,
"user": i.by_who
})
print(col)
# print(result)
print(notf)
context["object_list"] = result
context["info"] = col
context["notf"] = notf
# context["user"] =
return render(request, "index.html", context) | 2.359375 | 2 |
python/22.py | kylekanos/project-euler-1 | 0 | 12769478 | <gh_stars>0
#!/usr/bin/env python
# ad hoc, just do what it says to do
names = sorted([name.strip('"') for name in raw_input().split(',')])
print sum((i+1)*sum(ord(c)-ord('A')+1 for c in names[i]) for i in xrange(len(names)))
| 3.21875 | 3 |
idm_lp/database/chat_enter.py | lper1/dgm-. | 47 | 12769479 | from typing import Optional
from pydantic import BaseModel
class ChatEnterModel(BaseModel):
peer_id: int
hello_text: Optional[str]
| 2.4375 | 2 |
tests/tabnet/test_utils.py | clemens33/thesis | 0 | 12769480 | <filename>tests/tabnet/test_utils.py
import pytest
import torch
class TestGhostBatchNorm1d():
@pytest.mark.parametrize("batch_size, input_size, momentum, virtual_batch_size",
[
(128, 32, 0.1, 4),
(1024, 512, 0.01, 128),
])
def test_statistics_2d(self, batch_size, input_size, momentum, virtual_batch_size):
"""tests ghost batch norm statistics"""
from tabnet.utils import GhostBatchNorm1d
input = torch.randn(size=(batch_size, input_size))
gbn = GhostBatchNorm1d(input_size=input_size, momentum=momentum, virtual_batch_size=virtual_batch_size)
output = gbn(input)
mean = torch.mean(output)
std = torch.std(output)
assert torch.allclose(mean, torch.tensor(0.0))
assert torch.allclose(std, torch.tensor(1.0), atol=1e-4)
@pytest.mark.parametrize("batch_size, sequence_length, input_size, momentum, virtual_batch_size",
[
(128, 10, 32, 0.1, 128),
(1024, 5, 512, 0.01, 128),
(1024, 100, 512, 0.01, 10),
(128, 512, 512, 0.1, 2),
])
def test_statistics_3d(self, batch_size, sequence_length, input_size, momentum, virtual_batch_size):
from tabnet.utils import GhostBatchNorm1d
input = torch.randn(size=(batch_size, sequence_length, input_size))
gbn = GhostBatchNorm1d(input_size=input_size, momentum=momentum, virtual_batch_size=virtual_batch_size)
output = gbn(input)
mean = torch.mean(output, dim=(0, 1))
std = torch.std(output, dim=(0, 1))
# TODO verify proper numerical differences
assert torch.allclose(mean, torch.zeros_like(mean), atol=1e-7)
assert torch.allclose(std, torch.ones_like(std), atol=1e-3)
| 2.328125 | 2 |
tests/post/steps/test_salt_api.py | CPT-Jack-A-Castle/metalk8s | 1 | 12769481 | import ast
import base64
import json
import requests
import pytest
from pytest_bdd import parsers, scenario, then, when
def _negation(value):
"""Parse an optional negation after a verb (in a Gherkin feature spec)."""
if value == "":
return False
elif value in [" not", "not"]:
return True
else:
raise ValueError("Cannot parse '{}' as an optional negation".format(value))
# Scenario {{{
@scenario("../features/salt_api.feature", "Login to SaltAPI using Basic auth")
def test_login_basic_auth_to_salt_api(host):
pass
@scenario(
"../features/salt_api.feature", "Login to SaltAPI using an admin ServiceAccount"
)
def test_login_salt_api_admin_sa(host):
pass
@scenario(
"../features/salt_api.feature",
"Login to SaltAPI using the storage-operator ServiceAccount",
)
def test_login_salt_api_storage_operator(host):
pass
@scenario("../features/salt_api.feature", "Login to SaltAPI using any ServiceAccount")
def test_login_salt_api_service_account(host):
pass
@scenario(
"../features/salt_api.feature", "SaltAPI impersonation using a ServiceAccount"
)
def test_salt_api_impersonation_with_bearer_auth(host):
pass
@pytest.fixture
def salt_api_address(control_plane_ip):
return "{}:{}".format(control_plane_ip, 4507)
@pytest.fixture(scope="function")
def context():
return {}
# }}}
# When {{{
@when(parsers.parse("we login to SaltAPI as '{username}' using password '{password}'"))
def login_salt_api_basic(host, username, password, salt_api_address, context):
context["salt-api"] = _salt_api_login(
salt_api_address, username=username, password=password
)
@when("we login to SaltAPI with an admin ServiceAccount")
def login_salt_api_admin_sa(host, k8s_client, admin_sa, salt_api_address, context):
sa_name, sa_namespace = admin_sa
context["salt-api"] = _login_salt_api_sa(
salt_api_address, k8s_client, sa_name, sa_namespace
)
@when(
parsers.parse(
"we login to SaltAPI with the ServiceAccount '{namespace}/{account_name}'"
)
)
def login_salt_api_system_sa(
host, k8s_client, namespace, account_name, salt_api_address, context
):
context["salt-api"] = _login_salt_api_sa(
salt_api_address,
k8s_client,
account_name,
namespace,
)
@when(
parsers.parse(
"we impersonate user '{username}' against SaltAPI "
"using the ServiceAccount '{namespace}/{account_name}'"
)
)
def login_salt_api_token_override_username(
host, k8s_client, namespace, account_name, username, salt_api_address, context
):
context["salt-api"] = _login_salt_api_sa(
salt_api_address,
k8s_client,
account_name,
namespace,
username=username,
)
# }}}
# Then {{{
@then(
parsers.cfparse(
"we can{negated:Negation?} ping all minions",
extra_types={"Negation": _negation},
)
)
def ping_all_minions(host, context, negated):
result = _salt_call(context, "test.ping", tgt="*")
if negated:
assert result.status_code == 401
assert "No permission" in result.text
else:
result_data = result.json()
assert result_data["return"][0] != []
@then(
parsers.cfparse(
"we can{negated:Negation?} run state '{module}' on '{targets}'",
extra_types={"Negation": _negation},
)
)
def run_state_on_targets(host, context, negated, module, targets):
result = _salt_call(context, "state.sls", tgt=targets, kwarg={"mods": module})
if negated:
assert result.status_code == 401
assert "No permission" in result.text
else:
assert result.status_code == 200
@then("authentication fails")
def authentication_fails(host, context):
assert context["salt-api"]["login-status-code"] == 401
@then("authentication succeeds")
def authentication_succeeds(host, context):
assert context["salt-api"]["login-status-code"] == 200
@then(parsers.parse("we can invoke '{modules}' on '{targets}'"))
def invoke_module_on_target(host, context, modules, targets):
assert {targets: ast.literal_eval(modules)} in context["salt-api"]["perms"]
@then(parsers.parse("we have '{perms}' perms"))
def have_perms(host, context, perms):
assert perms in context["salt-api"]["perms"]
@then(parsers.parse("we have no permissions"))
def have_no_perms(host, context):
assert context["salt-api"]["perms"] == {}
# }}}
# Helpers {{{
def _login_salt_api_sa(address, k8s_client, name, namespace, username=None):
service_account = k8s_client.read_namespaced_service_account(
name=name, namespace=namespace
)
secret = k8s_client.read_namespaced_secret(
name=service_account.secrets[0].name, namespace=namespace
)
token = base64.decodebytes(secret.data["token"].encode("utf-8"))
if username is None:
username = "system:serviceaccount:{}:{}".format(namespace, name)
return _salt_api_login(address, username=username, token=token)
def _salt_api_login(address, username=None, password=None, token=None):
data = {"eauth": "kubernetes_rbac"}
if username:
data["username"] = username
if password:
data["password"] = password
if token:
data["token"] = token
response = requests.post(
"https://{}/login".format(address),
data=data,
verify=False,
)
result = {
"url": "https://{}".format(address),
"token": None,
"perms": [],
"login-status-code": response.status_code,
}
if response.status_code == 200:
json_data = response.json()
result["token"] = json_data["return"][0]["token"]
result["perms"] = json_data["return"][0]["perms"]
return result
def _salt_call(context, fun, tgt="*", arg=None, kwarg=None):
action = {
"client": "local",
"tgt": tgt,
"fun": fun,
}
if arg is not None:
action["arg"] = arg
if kwarg is not None:
action["kwarg"] = kwarg
return requests.post(
context["salt-api"]["url"],
json=[action],
headers={
"X-Auth-Token": context["salt-api"]["token"],
},
verify=False,
)
# }}}
| 2.546875 | 3 |
roles/systemd_monitor/templates/scripts/monitor.py | ekaparulin/systemd-monitor | 1 | 12769482 | <gh_stars>1-10
#!/usr/bin/python3
from pydbus import SystemBus
from gi.repository import GLib
import yaml
import sys
import os
import urllib.request
import urllib.error
import json
import time
import platform
class OpsGenieApi:
def __init__(self):
self.genie_key = os.environ.get('OPSGENIE_APIKEY')
self.base_url = 'https://api.%s/v2/alerts' % os.environ.get('OPSGENIE_SERVER', 'opsgenie.com')
self.alerts = []
assert self.genie_key != None
def create_alert(self, message='', description='', priority='P5', tags=[]):
# Suppress duplicates
if len(self.alerts) > 0:
return
print('Create alert: "%s"' % (message))
payload = {}
payload['message'] = message
payload['description'] = description
payload['priority'] = priority
payload['tags'] = tags
headers = {}
headers['Content-Type'] = 'application/json'
req_data = json.loads(self.__request(url=self.base_url, data=payload, headers=headers))
tries = 0
while tries < 3:
try:
alert_data = json.loads(self.__request(self.base_url + '/requests/' + req_data['requestId'] ))
self.alerts.append(alert_data['data']['alertId'])
break
except urllib.error.HTTPError:
tries += 1
time.sleep(1)
def close_alerts(self, note=None):
while len(self.alerts) != 0:
self.close_alert(alert_id=self.alerts.pop(), note=note)
def close_alert(self, alert_id, note=None):
print('Close alert: %s, reason: "%s"' % (alert_id, note))
payload = {}
if note != None:
payload['note'] = note
headers = {}
headers['Content-Type'] = 'application/json'
self.__request(url=self.base_url + '/%s/close?identifierType=id' % alert_id, data=payload, headers=headers)
def __request(self, url, data=None, headers={}):
if data != None:
data = json.dumps(data).encode('utf8')
req = urllib.request.Request(url, data)
for k,v in headers.items():
req.add_header(k, v)
req.add_header('Authorization', 'GenieKey %s' % self.genie_key)
resp = urllib.request.urlopen(req)
data = resp.read()
return(data.decode(resp.info().get_content_charset('utf-8')))
class Unit:
def __init__(self, name):
self.bus = SystemBus()
self.systemd = SystemBus().get('.systemd1')
self.name = name
self.unit = self.bus.get('.systemd1', self.systemd.GetUnit(self.name))
self.connection = self.unit['org.freedesktop.DBus.Properties'].PropertiesChanged.connect(self.callback)
self.og = OpsGenieApi()
self.notify(True)
def active_state(self):
return self.unit.ActiveState
def callback(self, interface_name, changed_properties, invalidated_properties):
if interface_name != 'org.freedesktop.systemd1.Unit':
return
self.notify()
def name(self):
return self.name
def disconnect(self):
self.connection.disconnect()
def notify(self, suppress_active=False):
if self.unit.ActiveState == 'active':
if suppress_active:
return
self.og.close_alerts(note='%s: systemd %s state is %s (sub state=%s)' % (platform.node(), self.name, self.unit.ActiveState, self.unit.SubState))
return
if self.unit.ActiveState == 'inactive':
self.og.create_alert(
message='%s: systemd %s is %s' % ( platform.node(), self.name, self.unit.ActiveState),
description='%s: systemd %s (state=%s, sub state=%s)' % ( platform.node(), self.name, self.unit.ActiveState, self.unit.SubState),
priority='P5', tags=['Moi'])
if __name__ == '__main__':
# Parse config
if len(sys.argv) < 2:
sys.exit('Usage: %s /path/to/config.yaml' % sys.argv[0])
if not os.path.exists(sys.argv[1]):
sys.exit('ERROR: Config file %s was not found!' % sys.argv[1])
config_file = open(sys.argv[1])
config = yaml.load(config_file, Loader=yaml.FullLoader)
# Set env vars
if 'opsgenie' in config.keys():
if 'server' in config['opsgenie'].keys():
os.environ['OPSGENIE_SERVER'] = os.environ.get('OPSGENIE_SERVER', config['opsgenie']['server'])
if 'apikey' in config['opsgenie'].keys():
os.environ['OPSGENIE_APIKEY'] = os.environ.get('OPSGENIE_APIKEY', config['opsgenie']['apikey'])
# Build watchers
units = []
for unit in config['watch']:
units.append(Unit(unit))
# Enter the main loop
loop = GLib.MainLoop()
try:
loop.run()
except KeyboardInterrupt:
for unit in units:
unit.disconnect()
| 2.34375 | 2 |
ClassificationAndRegression/EnsembleMethods/BaggingClassifier.py | kopok2/machine-learning-algorithms | 0 | 12769483 | # coding=utf-8
"""Comparison of various classifiers acting alone and inside an bagging ensemble."""
from sklearn import datasets, model_selection, metrics, tree, ensemble
if __name__ == "__main__":
print("Loading data...")
X, y = datasets.load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y)
print("Fitting classifiers...")
t = tree.DecisionTreeClassifier()
t.fit(X_train, y_train)
e = ensemble.BaggingClassifier(tree.DecisionTreeClassifier(), n_estimators=35, max_features=0.5, max_samples=0.5)
e.fit(X_train, y_train)
print("Evaluating classifiers...")
print("#" * 128)
print("Decision tree:")
print("Test:")
print(metrics.classification_report(y_test, t.predict(X_test)))
print(metrics.confusion_matrix(y_test, t.predict(X_test)))
print("Training:")
print(metrics.classification_report(y_train, t.predict(X_train)))
print(metrics.confusion_matrix(y_train, t.predict(X_train)))
print("#" * 128)
print("Decision tree ensemble:")
print("Decision tree:")
print("Test:")
print(metrics.classification_report(y_test, e.predict(X_test)))
print(metrics.confusion_matrix(y_test, e.predict(X_test)))
print("Training:")
print(metrics.classification_report(y_train, e.predict(X_train)))
print(metrics.confusion_matrix(y_train, e.predict(X_train)))
| 2.75 | 3 |
lv3/2xn.py | 5joon2/algorithm_programmers | 0 | 12769484 | import sys
sys.setrecursionlimit(60000)
def solution(n):
# must recursive dp
# dp = [-1 for i in range(60001)]
# dp[1] = 1
# dp[2] = 2
# for i in range(3, (n+1)):
# dp[i] = dp[i-1] + dp[i-2]
# return dp[n] % 1000000007
dp = [-1 for i in range(60001)]
dp[1] = 1
dp[2] = 2
def call_dp(n):
if dp[n] != -1:
return dp[n]
elif n == 0:
return 1
elif n == 1:
return 1
elif n == 2:
return 2
dp[n] = (call_dp(n-1) + call_dp(n-2)) % 1000000007
return dp[n]
return call_dp(n)
| 2.625 | 3 |
utils/label_collection.py | SteveXWu/face_attribute_label_tool | 0 | 12769485 | from PySide2 import QtWidgets, QtCore
class LabelCollector(object):
def __init__(self, ui):
self.ui = ui
self.label_buttons = {}
self.occluded_buttons = {}
self.names = {}
for name, value in vars(self.ui).items():
if isinstance(value, QtWidgets.QButtonGroup):
self.label_buttons[name] = value
if isinstance(value, QtWidgets.QCheckBox):
self.occluded_buttons[name] = value
self.label_buttons = sorted(self.label_buttons.items(), key=lambda x: int(x[0].split("_")[-1]))
self.labels = [-1] * 40
for index, (_, value) in enumerate(self.label_buttons):
value.buttonClicked.connect(lambda value, index=index: self.collect(value, index, "label"))
self.occluded = [0] * 40
for index, (_, value) in enumerate(self.occluded_buttons.items()):
value.clicked.connect(lambda value=value, index=index: self.collect(value, index, "occluded"))
def collect(self, button, index, which_list):
if which_list == "label":
self.labels[index] = int(button.text())
elif which_list == "occluded":
if button.checkState() == QtCore.Qt.CheckState.Unchecked:
self.occluded[index] = 0
else:
self.occluded[index] = 1
# print(self.labels)
def button_init(self, labels, occludeds):
for index, ((_, label_button), (_, occluded_button)) in enumerate(
zip(self.label_buttons, self.occluded_buttons.items())):
label_button.setExclusive(False)
for j, btn in enumerate(label_button.buttons()):
if labels[index] != -1 and labels[index] == j:
btn.setChecked(True)
else:
btn.setChecked(False)
occluded_button.setChecked(occludeds[index] == 1)
label_button.setExclusive(True)
def label_list(self):
return self.labels
def occluded_list(self):
return self.occluded
def set_lists(self, label_list, occluded_list):
self.labels = label_list
self.occluded = occluded_list
self.button_init(label_list, occluded_list)
def isfinished(self):
return bool(-1 not in self.labels)
| 2.75 | 3 |
source/main.py | wanghongsilver/34465A_dfh5_parsing | 1 | 12769486 | <reponame>wanghongsilver/34465A_dfh5_parsing
#-*- coding: utf-8 -*-
import h5py
def getDataFromH5py(fileName,target,start,length):
h5f = h5py.File(fileName, 'r')
list(h5f.keys())
print(l)
if not h5f.__contains__(target):
res = []
elif(start+length>=h5f[target].shape[0]):
res = h5f[target].value[start:h5f[target].shape[0]]
else:
res = h5f[target].value[start:start+length]
h5f.close()
return res
def main():
"""项目主函数"""
file_name = './34465A Data Log 2020-06-12 12-59-16 72.hdf5'
h5 = h5py.File('../input_hdf5_data/Data_Log.hdf5', 'r')
print(list(h5.keys()))
h5.close()
main()
| 2.828125 | 3 |
tests/ingestion/transformers/monosi/test_monitors.py | monosidev/monosi | 156 | 12769487 | <gh_stars>100-1000
import pytest
import ingestion.transformers.monosi.monitors as monitors
@pytest.fixture
def schema():
return {
'columns': ['NAME', 'COL_NAME', 'COL_TYPE', 'COL_DESCRIPTION', 'COL_SORT_ORDER', 'DATABASE', 'SCHEMA', 'DESCRIPTION', 'IS_VIEW'],
'rows': [
{
'NAME': 'name_of_table',
'COL_NAME': 'name_of_col',
'COL_TYPE': 'timestamp_tz',
'COL_DESCRIPTION': None,
'COL_SORT_ORDER': '3',
'DATABASE': 'database',
'SCHEMA': 'schema',
'DESCRIPTION': None,
'IS_VIEW': 'false'
},
{
'NAME': 'name_of_table',
'COL_NAME': 'name_of_col_2',
'COL_TYPE': 'text',
'COL_DESCRIPTION': None,
'COL_SORT_ORDER': '3',
'DATABASE': 'database',
'SCHEMA': 'schema',
'DESCRIPTION': None,
'IS_VIEW': 'false'
},
{
'NAME': 'name_of_table_2',
'COL_NAME': 'name_of_col_3',
'COL_TYPE': 'int',
'COL_DESCRIPTION': None,
'COL_SORT_ORDER': '3',
'DATABASE': 'database',
'SCHEMA': 'schema',
'DESCRIPTION': None,
'IS_VIEW': 'false'
},
]
}
def test__transform_empty():
input_arr = {'rows': []}
output_arr = monitors.MonitorTransformer._transform(input_arr)
assert len(output_arr) == 0
def test__transform(schema):
output_arr = monitors.MonitorTransformer._transform(schema)
expected_num_monitors = 2
assert len(output_arr) == expected_num_monitors
@pytest.fixture
def monitor():
return {}
@pytest.fixture
def normalized_schema():
return monitors.MonitorTransformer._normalized_schema()
def test__normalized_schema_correct(normalized_schema, monitor):
input_arr = [monitor]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == True
def test__normalized_schema_correct_multiple(normalized_schema, monitor):
input_arr = [monitor, monitor]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == True
def test__normalized_schema_incorrect_to_have_none(normalized_schema):
input_arr = []
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == False
def test__normalized_schema_incorrect(normalized_schema):
input_arr = [{"anything": "goeshere"}]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == False
def test__normalized_schema_incorrect_multiple(normalized_schema):
input_arr = [{}, {"anything": "goeshere"}]
is_correct = monitors.MonitorTransformer.match(input_arr, normalized_schema)
assert is_correct == False
@pytest.fixture
def original_schema():
return monitors.MonitorTransformer._original_schema()
def test__original_schema_correct(original_schema, schema):
is_correct = monitors.MonitorTransformer.match(schema, original_schema)
assert is_correct == True
def test__original_schema_incorrect_to_have_none(original_schema):
is_correct = monitors.MonitorTransformer.match({}, original_schema)
assert is_correct == False
def test__original_schema_incorrect(original_schema):
input_arr = {'anything': 'goeshere'}
is_correct = monitors.MonitorTransformer.match(input_arr, original_schema)
assert is_correct == False
| 2.125 | 2 |
TensorFlow/_BasicsReference/ref_tensors.py | superjamesmccafferty/DeepLearning-PersonalExamples | 0 | 12769488 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def write_event_file():
writer = tf.summary.FileWriter('.')
writer.add_graph(tf.get_default_graph())
print("Event Written")
def execute():
"""
Tensor programs of graphs made of operations and tensors
Tensors: N dimensional arrays
Operations: Some kinf of transformation that normally outputs a tensor
Below are examples of a constant operation that outputs a single dimensional vector.
If you try to print these lines, you won't get a value because they have not yet been run in a session
"""
a = tf.constant(3.0, dtype=tf.float32)
b = tf.constant(4.0) # also tf.float32 implicitly
"""
We can create a graph by doing operations.
"""
total = a + b
"""
You can easily visualize this graph by using tensor board.
1) Output event file using write_event_file() [ABOVE]
2) Use command tensorboard --logdir . in your terminal
It'll start a webapp at localhost:6006
"""
| 3.640625 | 4 |
ocimatic/solutions.py | OCIoficial/ocimatic | 0 | 12769489 | <gh_stars>0
from pathlib import Path
from typing import Iterable, List, Optional, Tuple
from ocimatic import ui
from ocimatic.checkers import Checker
from ocimatic.dataset import Dataset
from ocimatic.source_code import CppSource, JavaSource, SourceCode
class Solution:
"""Abstract class to represent a solution
"""
def __init__(self, source: SourceCode):
self._source = source
@staticmethod
def load_solutions_in_dir(codename: str, dir: Path, managers_dir: Path) -> List['Solution']:
"""Search for solutions in a directory."""
assert dir.is_dir()
return [
solution for file_path in dir.iterdir()
for solution in [Solution.load(codename, file_path, managers_dir)] if solution
]
@staticmethod
def load(codename: str, source_path: Path, managers_dir: Path) -> Optional['Solution']:
if not source_path.is_file():
return None
source: SourceCode
if source_path.suffix == '.cpp':
grader = Path(managers_dir, 'grader.cpp')
source = CppSource(source_path,
extra_sources=[grader] if grader.exists() else [],
include=managers_dir)
elif source_path.suffix == '.java':
source = JavaSource(codename, source_path)
else:
return None
return Solution(source)
@ui.solution_group()
def run(self,
dataset: Dataset,
checker: Checker,
check: bool = False,
sample: bool = False) -> Iterable[ui.WorkResult]:
"""Run this solution for all test cases in the given dataset."""
runnable = self._source.build()
success = runnable is not None
yield ui.WorkResult(success=success, short_msg="OK" if success else "Failed")
if runnable:
dataset.run(runnable, checker, sample=sample, check=check)
@ui.solution_group()
def gen_expected(self, dataset: Dataset, sample: bool = False) -> Iterable[ui.WorkResult]:
"""Generate expected output files for all test cases in the given dataset
running this solution."""
runnable = self._source.build()
success = runnable is not None
yield ui.WorkResult(success=success, short_msg="OK" if success else "Failed")
if runnable is not None:
dataset.gen_expected(runnable, sample=sample)
@ui.work('Build')
def build(self) -> ui.WorkResult:
"""Build solution."""
success = self._source.build(True) is not None
msg = 'OK' if success else 'FAILED'
return ui.WorkResult(success=success, short_msg=msg)
@property
def name(self) -> str:
return self._source.name
def __str__(self) -> str:
return str(self.name)
@property
def source(self) -> SourceCode:
return self._source
| 2.3125 | 2 |
openvpp-agents/tests/test_mosaik.py | mtroeschel/isaac | 8 | 12769490 | <filename>openvpp-agents/tests/test_mosaik.py
from asyncio import coroutine
import json
import lzma
import arrow
import dateutil.tz
import pytest
from openvpp_agents import mosaik
from openvpp_agents import util
demand_meta = {
'start_time': '2009-12-31T23:00:00+00:00',
'interval_minutes': 60,
'cols': ['p_el', 'p_th_heat', 'p_th_water'],
}
demand_data = """{meta}
0,0,0
0,15,10
0,30,30
""".format(meta=json.dumps(demand_meta))
target_meta = {
'start_time': '2009-12-31T23:00:00+00:00',
'interval_minutes': 60,
'cols': ['p_el', 'weight'],
}
target_data = """{meta}
0,1
0,1
0,1
""".format(meta=json.dumps(target_meta))
@pytest.fixture(params=['csv', 'csv.xz'])
def demand_file(request, tmpdir):
"""Return the path (as string) to either a "demand.csv" or a
"demand.csv.xz"."""
ext = request.param
df = tmpdir.join('test_data.%s' % ext)
data = {
'csv': lambda d: d.encode(),
'csv.xz': lambda d: lzma.compress(d.encode()),
}
df.write_binary(data[ext](demand_data))
return df.strpath
@pytest.fixture
def target_dir(tmpdir):
tfd = tmpdir.mkdir('targets')
tf = tfd.join('electricaltarget1.csv')
tf.write(target_data)
return tfd.strpath
@pytest.yield_fixture
def user(containers, demand_file, target_dir, monkeypatch):
# Mock the UserAgent's run() method to avoid nasty side effects:
monkeypatch.setattr(mosaik.UserAgent, 'run', coroutine(lambda self: None))
user = mosaik.UserAgent(containers[0], dca_addr=None,
demand_file=demand_file,
target_dir=target_dir)
yield user
user.stop()
def test_user_init_demand_gen(user):
"""Test that the "thermal demand" generator is correctly initialized."""
ret = user._demand_gen
assert ret[:2] == (
arrow.get(demand_meta['start_time']),
demand_meta['interval_minutes'] * 60,
)
assert list(ret[2]) == ['0,0,0\n', '0,15,10\n', '0,30,30\n']
def test_user_get_thermal_demand_forecast(user):
start = arrow.get(demand_meta['start_time'])
fc = user._get_thermal_demand_forecast(start, 2)
assert fc == util.TimeSeries(start, demand_meta['interval_minutes'] * 60,
[0, 25])
start = start.replace(hours=1)
fc = user._get_thermal_demand_forecast(start, 2)
assert fc == util.TimeSeries(start, demand_meta['interval_minutes'] * 60,
[25, 60])
@pytest.mark.parametrize('now, hour, minute, expected', [
('2015-01-01T00:00:00', 1, 30, '2015-01-01T00:30:00'), # today
('2015-01-01T12:00:00', 1, 30, '2015-01-02T00:30:00'), # tomorrow
('2015-01-01T00:30:00', 1, 30, '2015-01-02T00:30:00'), # now -> tomorrow
('2015-03-29T00:00:00', 4, 30, '2015-03-29T02:30:00'), # switch to DST
('2015-10-25T00:00:00', 4, 30, '2015-10-25T03:30:00'), # switch from DST
])
def test_user_get_next_date(user, now, hour, minute, expected):
now = arrow.get(now)
expected = arrow.get(expected)
user.container.clock.utcnow = lambda: now
date = user._get_next_date(hour, minute)
assert date.tzinfo == dateutil.tz.tzutc()
assert date == expected
@pytest.mark.parametrize('now, exp_start, exp_end', [
('2015-01-01T12:00:00', '2015-01-01T23:00:00', '2015-01-02T23:00:00'),
('2015-03-28T12:00:00', '2015-03-28T23:00:00', '2015-03-29T22:00:00'),
('2015-10-24T12:00:00', '2015-10-24T22:00:00', '2015-10-25T23:00:00'),
])
def test_user_get_dap_dates(user, now, exp_start, exp_end):
now, exp_start, exp_end = [arrow.get(d) for d in [now, exp_start, exp_end]]
ts = util.TimeSeries(exp_start, 900, [0 for i in range(96)])
user._ts = ts
s, e = user._get_dap_dates(user.container.clock)
assert s.tzinfo == dateutil.tz.tzutc()
assert s == exp_start
assert e.tzinfo == dateutil.tz.tzutc()
assert e == exp_end
| 2.203125 | 2 |
Old/Python-Advanced-Preliminary-Homeworks/Multidimensional Lists/07E. Bombs.py | MNikov/Python-Advanced-September-2020 | 4 | 12769491 | def create_matrix(size):
matrix = []
for _ in range(size):
matrix.append([int(x) for x in input().split()])
return matrix
def is_valid_cell(position, size):
row = position[0]
col = position[1]
return 0 <= row < size and 0 <= col < size
def print_matrix(matrix):
for row in matrix:
print(' '.join([str(x) for x in row]))
def count_alive_cells_and_sum_them(matrix, size):
alive_cells_count = 0
alive_cells_sum = 0
for r in range(size):
for c in range(size):
if matrix[r][c] > 0:
alive_cells_count += 1
alive_cells_sum += matrix[r][c]
return alive_cells_count, alive_cells_sum
size = int(input())
matrix = create_matrix(size)
base_matrix = matrix
bomb_coordinates = input().split()
target_rows = [-1, -1, -1, 0, 1, 1, 1, 0]
target_cols = [-1, 0, 1, 1, 1, 0, -1, -1]
for row_col in bomb_coordinates:
tokens = row_col.split(',')
row, col = int(tokens[0]), int(tokens[1])
current_bomb = base_matrix[row][col]
if base_matrix[row][col] > 0:
for i in range(8):
bombed_cell_position = [row + target_rows[i], col + target_cols[i]]
if is_valid_cell(bombed_cell_position, size):
cell_row = bombed_cell_position[0]
cell_col = bombed_cell_position[1]
if matrix[cell_row][cell_col] > 0:
matrix[cell_row][cell_col] -= current_bomb
matrix[row][col] = 0
base_matrix[row][col] = 0
alive_cells_count, alive_cells_sum = count_alive_cells_and_sum_them(matrix, size)
print(f"Alive cells: {alive_cells_count}")
print(f"Sum: {alive_cells_sum}")
print_matrix(matrix)
| 3.46875 | 3 |
decoy/pytest_plugin.py | mcous/decoy | 8 | 12769492 | """Pytest plugin to setup and teardown a Decoy instance.
The plugin will be registered with pytest when you install Decoy. It adds a
fixture without modifying any other pytest behavior. Its usage is optional
but highly recommended.
"""
import pytest
from typing import Iterable
from decoy import Decoy
@pytest.fixture
def decoy() -> Iterable[Decoy]:
"""Get a [decoy.Decoy][] container and tear it down after the test.
This function is function-scoped [pytest fixture][] that will be
automatically inserted by the plugin.
[pytest fixture]: https://docs.pytest.org/en/latest/how-to/fixtures.html
Example:
```python
def test_my_thing(decoy: Decoy) -> None:
my_fake_func = decoy.mock()
# ...
```
"""
decoy = Decoy()
yield decoy
decoy.reset()
| 2.546875 | 3 |
algorithm/k-community_detection/graph_kClusterAlgorithm_functions.py | lanl/Quantum_Graph_Algorithms | 1 | 12769493 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import re, os, sys
from dwave_qbsolv import QBSolv
from dwave.system.samplers import DWaveSampler, DWaveCliqueSampler
from dwave.system.composites import EmbeddingComposite, FixedEmbeddingComposite
import dimod
import hybrid
import minorminer
import networkx as nx
from numpy import linalg as la
from networkx.generators.atlas import *
import numpy as np
import networkx as nx
import random, copy
import math
from scipy.sparse import csr_matrix
import argparse
import logging
import datetime as dt
from qpu_sampler_time import QPUTimeSubproblemAutoEmbeddingSampler
#
# The Quantum Graph Community Detection Algorithm has been described
# in the following publications. Please cite in your publication.
#
# <NAME>, <NAME>, <NAME>,
# 2017, Graph Partitioning using Quantum Annealing on the
# D-Wave System, Proceedings of the 2nd International
# Workshop on Post Moore’s Era Supercomputing (PMES), 22-29.
#
# <NAME>, <NAME>, <NAME> 2020, Detecting
# Multiple Communities using Quantum Annealing on the D-Wave System,
# PLOS ONE 15(2): e0227538. https://doi.org/10.1371/journal.pone.0227538
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, 2021, Reduction of the Molecular Hamiltonian Matrix using
# Quantum Community Detection, Sci Rep 11, 4099 (2021).
# https://doi.org/10.1038/s41598-021-83561-x#
#
def build_mod(Adj, thresh, num_edges):
#Builds the modularity matrix from the Adjacency matrix.
#Given an adj matrix, it constructs the modularity matrix and its graph.
Dim = Adj.shape[1]
print ("\n Dim = ", Dim)
print ("\n Computing modularity matrix ...")
Deg = np.zeros([Dim])
M = 0.0
# Calc Adj degrees
Deg = Adj.sum(1)
M = Deg.sum()
mtotal = M/2.0
Mod = np.zeros([Dim,Dim])
# Calc modularity matrix
Mod = Mod + Adj
Mod = Mod - (Deg * Deg.T)/M
np.set_printoptions(precision=3)
return mtotal, Mod
def get_block_number(big_indx, num_blocks, num_nodes):
#indx = math.ceil(big_indx/num_nodes) # node indx starts from 0
indx = math.floor(big_indx/num_nodes) # node indx starts from 0
#print("big_indx=", big_indx," Indx=", indx, " num_blocks=", num_blocks)
if indx > num_blocks-1:
raise ValueError("block indx cannot be larger than num_blocks-1")
return int(indx)
def get_indx_within_block(big_indx, num_nodes):
return big_indx%num_nodes
def get_entry_beta_B(i_indx, j_indx, beta, graph, modularity, num_nodes, num_blocks):
i_block_indx = get_block_number(i_indx, num_blocks, num_nodes)
j_block_indx = get_block_number(j_indx, num_blocks, num_nodes)
i_indx_within_block = get_indx_within_block(i_indx, num_nodes)
j_indx_within_block = get_indx_within_block(j_indx, num_nodes)
if i_block_indx == j_block_indx:
return beta*modularity[i_indx_within_block, j_indx_within_block]
else:
return 0
def get_entry_B_Gamma(i_indx, j_indx, modularity, beta,gamma, GAMMA, num_nodes, num_parts, num_blocks):
i_indx_within_block = get_indx_within_block(i_indx, num_nodes)
j_indx_within_block = get_indx_within_block(j_indx, num_nodes)
if i_indx_within_block == j_indx_within_block:
return gamma[i_indx_within_block]
else:
return 0
def get_entry_add_diag(i_indx,gamma, GAMMA, num_nodes, num_parts, num_blocks):
gamma_entry = GAMMA[i_indx]
return -2*gamma_entry
def get_i_j_entry(i_indx, j_indx, modularity, beta, gamma, GAMMA, graph, num_nodes, num_parts, num_blocks):
#print("i_indx=", i_indx," j_indx=", j_indx)
if i_indx == j_indx:
bB = get_entry_beta_B(i_indx, j_indx, beta, graph, modularity, num_nodes, num_blocks)
BG = get_entry_B_Gamma(i_indx, j_indx, modularity, beta, gamma, GAMMA, num_nodes, num_parts, num_blocks)
diag = get_entry_add_diag(i_indx,gamma, GAMMA, num_nodes, num_parts, num_blocks)
return bB + BG + diag
else:
bB = get_entry_beta_B(i_indx, j_indx, beta, graph, modularity, num_nodes, num_blocks)
BG = get_entry_B_Gamma(i_indx, j_indx, modularity, beta, gamma, GAMMA, num_nodes, num_parts, num_blocks)
return bB + BG
def threshold_mmatrix(graph, mmatrix, threshold):
msize = mmatrix.shape[0]
for i in range(0, msize):
mmatrix[i,i] = mmatrix[i,i] + graph.degree(i)
for i in range(0, msize):
for j in range(0, msize):
if i!=j and abs(mmatrix[i,j]) < threshold:
mmatrix[i,j] = 0.0
return mmatrix
def makeQubo(graph, modularity, beta, gamma, GAMMA, num_nodes, num_parts, num_blocks, threshold):
# Create QUBO matrix
qsize = num_blocks*num_nodes
Q = np.zeros([qsize,qsize])
# Note: weights are set to the negative due to maximization
# Set node weights
for i in range(qsize):
entry = get_i_j_entry(i, i, modularity, beta, gamma, GAMMA, graph, num_nodes, num_parts, num_blocks)
Q[i,i] = -entry
# Set off-diagonal weights
for i in range(qsize):
for j in range(i, qsize):
if i != j:
entry = get_i_j_entry(i, j, modularity, beta, gamma, GAMMA, graph, num_nodes, num_parts, num_blocks)
if abs(entry) > threshold:
Q[i,j] = -entry
Q[j,i] = -entry
return Q
def write_qubo_file(graph, modularity, beta, gamma, GAMMA, num_nodes, num_parts, num_blocks, threshold):
###qubo format
# p qubo target maxDiagonals nDiagonals nElements
#target = 0 implies unconstrained problem
nElements = 0 #to be counted
maxDiagonals = num_nodes*num_blocks # number of diagonal in topology
nDiagonals = num_nodes*num_blocks #number of diagonals the problem
qubo_file = open("body.qubo", "w")
# Write node header
qubo_string_diag = "".join(["\nc nodes first \n"])
qubo_file.write(qubo_string_diag)
# Write nodes
for i in range(num_blocks*num_nodes):
entry = get_i_j_entry(i, i, modularity, beta, gamma, GAMMA, graph, num_nodes, num_parts, num_blocks)
qubo_string_diag = "".join([str(i)+" "+str(i)+" "+str(entry)+"\n"])
qubo_file.write(qubo_string_diag)
# Write coupler header
qubo_string_couplers = "".join(["\nc couplers \n"])
qubo_file.write(qubo_string_couplers)
# Write couplers
for i in range(num_blocks*num_nodes):
for j in range(i, num_blocks*num_nodes):
if i != j:
entry = get_i_j_entry(i, j, modularity, beta, gamma, GAMMA, graph, num_nodes, num_parts, num_blocks)
if abs(entry) > threshold:
qubo_string_couplers = "".join([str(i)+" "+str(j)+" "+str(2*entry)+"\n"]) #x2 because of what qbsolv minimizes
qubo_file.write(qubo_string_couplers)
nElements += 1
qubo_file.close()
# Write header to separate file now that we know the nElements
# p qubo target maxDiagonals nDiagonals nElements
qubo_file = open("graph.qubo", "w")
qubo_string_initialize = "".join(["p qubo 0 " + str(maxDiagonals)+" "+str(nDiagonals)+" "+str(nElements)+"\n"])
qubo_file.write(qubo_string_initialize)
qubo_file.close()
# Put qubo file together - header and body
os.system("cat body.qubo >> graph.qubo")
os.system("rm body.qubo")
def get_qubo_solution():
myFile = open("dwave_output.out", 'r')
line_count = 0
for lines in myFile:
line_count += 1
if line_count == 2:
bit_string = lines
break
return bit_string.strip()
def violating_contraints(graph, x_indx, num_blocks, num_nodes, num_parts, result):
#each node in exactly one part
for node in range(num_nodes):
value = 0
for j in range(num_blocks):
value += x_indx[(node, j)]
if value >1:
print ("constraint violated: node %d in %d parts. Degree: %d" %(node, value, graph.degree(node)))
value = 0
#balancing contraints
sum_v_i = 0
for node in range(num_nodes):
sum_x_ik = 0
for j in range(num_blocks):
sum_x_ik += x_indx[(node, j)]
node_i = (1 - sum_x_ik)
sum_v_i += node_i
print ("\nlast part size",sum_v_i , - num_nodes/float(num_parts))
num_clusters_found = 0
for j in range(num_blocks):
value = 0
for node in range(num_nodes):
value += x_indx[(node, j)]
print ("part %d has %d nodes" %(j, value))
if value > 0:
num_clusters_found += 1
result['num_clusters_found'] = num_clusters_found
#######################################################
######## penalty weight function #####################
####### ###################
def set_penalty_constant(num_nodes, num_blocks, beta0, gamma0):
beta = beta0
gamma = [gamma0 for i in range(num_nodes)]
GAMMA = [gamma[i] for j in range(num_blocks) for i in range(num_nodes) ]
return beta, gamma, GAMMA
#########
def calcModularityMetric(mtotal, modularity, part_number):
Dim = modularity.shape[1]
print ("\n Dim = ", Dim)
msum = 0.0
for ii in range(0, Dim):
for jj in range(0, Dim):
if part_number[ii] == part_number[jj]:
msum = msum + modularity[ii,jj]
mmetric = msum / (2.0 * mtotal)
return mmetric
def run_qbsolv():
rval = random.randint(1,1000)
estring = "qbsolv -r " + str(rval) + " -i graph.qubo -m -o dwave_output.out"
print('\n', estring)
os.system(estring)
def process_solution_qbsolv(graph, num_blocks, num_nodes, num_parts, result):
bit_string = get_qubo_solution()
print (bit_string)
print ("num non-zeros: ", sum([int(i) for i in bit_string]))
x_indx = {}
qubo_soln = [int(i) for i in bit_string]
for i in range(num_blocks*num_nodes):
i_block_indx = get_block_number(i, num_blocks, num_nodes)
i_indx_within_block = get_indx_within_block(i, num_nodes)
x_indx[(i_indx_within_block, i_block_indx)] = qubo_soln[i]
violating_contraints(graph, x_indx, num_blocks, num_nodes, num_parts, result)
part_number = {}
for key in x_indx:
node, part = key
if x_indx[key] == 1:
part_number[node] = part
return part_number
def process_solution(ss, graph, num_blocks, num_nodes, num_parts, result):
qsol = {}
for i in range(num_blocks*num_nodes):
qsol[i] = int(ss[0,i])
qtotal = 0
for i in range(num_blocks*num_nodes):
qtotal += qsol[i]
print('\nnum non-zeros = ', qtotal)
x_indx = {}
qubo_soln = qsol
for i in range(num_blocks*num_nodes):
i_block_indx = get_block_number(i, num_blocks, num_nodes)
i_indx_within_block = get_indx_within_block(i, num_nodes)
x_indx[(i_indx_within_block, i_block_indx)] = qubo_soln[i]
violating_contraints(graph, x_indx, num_blocks, num_nodes, num_parts, result)
part_number = {}
for key in x_indx:
node, part = key
if x_indx[key] == 1:
part_number[node] = part
return part_number
def getEmbedding(qsize):
#dsystem = DWaveCliqueSampler()
#embedding = dsystem.largest_clique()
#print('embedding found, len = ', len(embedding))
#print('embedding = ', embedding)
#exit(0)
ksize = qsize
qsystem = DWaveSampler()
ksub = nx.complete_graph(ksize).edges()
embedding = minorminer.find_embedding(ksub, qsystem.edgelist)
print('\nembedding done')
return embedding
def runDwave(Q, num_nodes, k, embedding, qsize, run_label, result):
# Using D-Wave/qbsolv
# Needed when greater than number of nodes/variables that can fit on the D-Wave
sampler = FixedEmbeddingComposite(DWaveSampler(), embedding)
#sampler = DWaveCliqueSampler()
rval = random.randint(1,10000)
t0 = dt.datetime.now()
solution = QBSolv().sample_qubo(Q, solver=sampler, seed=rval,
label=run_label)
wtime = dt.datetime.now() - t0
result['wall_clock_time'] = wtime
# Collect first energy and num_occ, num diff solutions, and total solutions
first = True
ndiff = 0
total_solns = 0
for sample, energy, num_occurrences in solution.data():
#print(sample, "Energy: ", energy, "Occurrences: ", num_occurrences)
if first == True:
result['energy'] = energy
result['num_occ'] = num_occurrences
first = False
ndiff += 1
total_solns += num_occurrences
result['num_diff_solns'] = ndiff
result['total_solns'] = total_solns
print('\n qbsolv response:')
print(solution)
ss = solution.samples()
#print("\n qbsolv samples=" + str(list(solution.samples())))
#print('\nss = ', ss)
print(flush=True)
return ss
def runDwaveHybrid(Q, num_nodes, k, sub_qsize, run_label, result):
bqm = dimod.BQM.from_qubo(Q)
rparams = {}
rparams['label'] = run_label
# QPU sampler with timing
QPUSubSamTime = QPUTimeSubproblemAutoEmbeddingSampler(num_reads=100, sampling_params=rparams)
# define the workflow
iteration = hybrid.Race(
hybrid.InterruptableTabuSampler(),
hybrid.EnergyImpactDecomposer(size=sub_qsize, rolling=True, rolling_history=0.15)
#| hybrid.QPUSubproblemAutoEmbeddingSampler(num_reads=100, sampling_params=rparams)
#| QTS.QPUTimeSubproblemAutoEmbeddingSampler(num_reads=100, sampling_params=rparams)
| QPUSubSamTime
| hybrid.SplatComposer()
) | hybrid.MergeSamples(aggregate=True)
workflow = hybrid.LoopUntilNoImprovement(iteration, convergence=3)
# Run the workflow
init_state = hybrid.State.from_problem(bqm)
t0 = dt.datetime.now()
solution = workflow.run(init_state).result()
wtime = dt.datetime.now() - t0
#hybrid.profiling.print_counters(workflow)
#print('\nQ timers = ', QPUSubSamTime.timers)
#print('\nQ counters = ', QPUSubSamTime.counters)
result['wall_clock_time'] = wtime
# Collect number of QPU accesses and QPU time used
result['num_qpu_accesses'] = QPUSubSamTime.num_accesses
result['total_qpu_time'] = QPUSubSamTime.total_qpu_time
# Collect from lowest energy result
result['energy'] = solution.samples.first.energy
result['num_occ'] = solution.samples.first.num_occurrences
# Collect number of different solutions w different energies
result['num_diff_solns'] = len(solution.samples)
total_solns = 0
for energy, num_occ in solution.samples.data(['energy', 'num_occurrences']):
total_solns += num_occ
result['total_solns'] = total_solns
# Show list of results in energy order
print(solution.samples)
# Collect the first solution
ss = np.zeros([1,num_nodes])
for i in range(num_nodes):
ss[0,i] = solution.samples.first.sample[i]
return ss
def cluster(Q, k, embedding, qsize, run_label, result):
# Start with Q
qsize = Q.shape[1]
print('\n Q size = ', qsize)
# Cluster into k parts using DWave
ss = runDwave(Q, qsize, k, embedding, qsize, run_label, result)
return ss
def clusterHybrid(Q, k, sub_qsize, run_label, result):
# Start with Q
qsize = Q.shape[1]
print('\n Q size = ', qsize)
# Cluster into k parts using Hybrid/DWave ocean
ss = runDwaveHybrid(Q, qsize, k, sub_qsize, run_label, result)
return ss
| 2.15625 | 2 |
sep/loaders/__init__.py | Fafa87/SEP | 0 | 12769494 | <reponame>Fafa87/SEP<gh_stars>0
from .files import FilesLoader
from .images import ImagesLoader
from .loader import Loader
from .movies import MoviesLoader, FrameByGroupSelector, FrameByIntervalSelector
from .youtube import YoutubeLoader
| 1.265625 | 1 |
mesen-s/labels-readme.py | adam-larson-lee/demons-crest | 0 | 12769495 | import os
LABELS_FILE_NAME = 'labels.msl'
README_FILE_NAME = 'README.md'
if os.path.exists(README_FILE_NAME):
os.remove(README_FILE_NAME)
labelsFile = open(LABELS_FILE_NAME, 'r')
readmeFile = open(README_FILE_NAME, 'a+')
readmeFile.write('# WORK\n')
readmeFile.write('\n')
readmeFile.write('|Address|Label|Comment|\n')
readmeFile.write('|-------|-----|-------|\n')
for labelsFileLine in labelsFile:
labelsFileLineComponents = labelsFileLine.replace('\n', '').split(':')
if labelsFileLineComponents[0] == 'WORK' :
readmeFile.write('|0x')
readmeFile.write(labelsFileLineComponents[1].rjust(6, '0'))
readmeFile.write('|')
readmeFile.write(labelsFileLineComponents[2])
readmeFile.write('|')
if len(labelsFileLineComponents) > 3:
readmeFile.write(labelsFileLineComponents[3])
readmeFile.write('|')
readmeFile.write('\n')
labelsFile.close()
readmeFile.close() | 2.515625 | 3 |
merlin/config/results_backend.py | ben-bay/merlin | 0 | 12769496 | <gh_stars>0
###############################################################################
# Copyright (c) 2019, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
# Written by the Merlin dev team, listed in the CONTRIBUTORS file.
# <<EMAIL>>
#
# LLNL-CODE-797170
# All rights reserved.
# This file is part of Merlin, Version: 1.2.3.
#
# For details, see https://github.com/LLNL/merlin.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
"""
This module contains the logic for configuring the Celery results backend.
"""
from __future__ import print_function
import logging
import os
from merlin.config.configfile import CONFIG
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
LOG = logging.getLogger(__name__)
BACKENDS = ["sqlite", "mysql", "redis", "none"]
# Default files needed for the package to connect to the Rabbit instance.
MYSQL_CONFIG_FILENAMES = {
"ssl_cert": "rabbit-client-cert.pem",
"ssl_ca": "mysql-ca-cert.pem",
"ssl_key": "rabbit-client-key.pem",
"password": "<PASSWORD>",
}
MYSQL_CONNECTION_STRING = (
"db+mysql+mysqldb://{user}:{password}@{server}/mlsi"
"?ssl_ca={ssl_ca}"
"&ssl_cert={ssl_cert}"
"&ssl_key={ssl_key}"
)
SQLITE_CONNECTION_STRING = "db+sqlite:///results.db"
def get_backend_password(password_file, certs_path=None):
if certs_path is None:
certs_path = CONFIG.celery.certs
password = None
password_file = os.path.expanduser(password_file)
if os.path.exists(password_file):
password_filepath = password_file
else:
password_filepath = os.path.join(certs_path, password_file)
if not os.path.exists(password_filepath):
# The password was given instead of the filepath.
password = <PASSWORD>
else:
with open(password_filepath, "r") as f:
line = f.readline().strip()
password = quote(line, safe="")
LOG.debug(f"Results backend certs_path = {certs_path}")
LOG.debug(f"Results backend password_filepath = {password_filepath}")
return password
def get_redis(certs_path=None, include_password=True):
server = CONFIG.results_backend.server
password_file = ""
try:
port = CONFIG.results_backend.port
except (KeyError, AttributeError):
port = 6379
LOG.warning(f"Results backend redis using default port = {port}")
try:
db_num = CONFIG.results_backend.db_num
except (KeyError, AttributeError):
db_num = 0
LOG.warning(f"Results backend redis using default db_num = {db_num}")
try:
username = CONFIG.results_backend.username
password_file = CONFIG.results_backend.password
try:
password = get_backend_password(password_file, certs_path=certs_path)
except IOError:
password = CONFIG.results_backend.password
if include_password:
spass = "%s:%s@" % (username, password)
else:
spass = "%s:%s@" % (username, "******")
except (KeyError, AttributeError):
spass = ""
LOG.warning(f"Results backend redis using default password = {spass}")
LOG.debug(f"Results backend password_file = {password_file}")
LOG.debug(f"Results backend server = {server}")
LOG.debug(f"Results backend certs_path = {certs_path}")
return "redis://%s%s:%d/%d" % (spass, server, port, db_num)
def get_mysql_config(certs_path, mysql_certs):
"""
Determine if all the information for connecting MySQL as the Celery
results backend exists.
"""
if not os.path.exists(certs_path):
return False
files = os.listdir(certs_path)
certs = {}
for key, filename in mysql_certs.items():
for f in files:
if not f == filename:
continue
certs[key] = os.path.join(certs_path, f)
return certs
def get_mysql(certs_path=None, mysql_certs=None, include_password=True):
"""Returns the formatted MySQL connection string."""
if certs_path is None:
certs_path = CONFIG.celery.certs
dbname = CONFIG.results_backend.dbname
password_file = CONFIG.results_backend.password
server = CONFIG.results_backend.server
# Adding an initial start for printing configurations. This should
# eventually be configured to use a logger. This logic should also
# eventually be decoupled so we can print debug messages similar to our
# Python debugging messages.
LOG.debug(f"Results backend dbname = {dbname}")
LOG.debug(f"Results backend password_file = {password_file}")
LOG.debug(f"Results backend server = {server}")
LOG.debug(f"Results backend certs_path = {certs_path}")
if not server:
msg = f"Results backend server {server} does not have a configuration"
raise Exception(msg)
password = get_backend_password(password_file, certs_path=certs_path)
if mysql_certs is None:
mysql_certs = MYSQL_CONFIG_FILENAMES
mysql_config = get_mysql_config(certs_path, mysql_certs)
if not mysql_config:
msg = "The connection information for MySQL could not be set."
raise Exception(msg)
mysql_config["user"] = CONFIG.results_backend.username
if include_password:
mysql_config["password"] = password
else:
mysql_config["password"] = "******"
mysql_config["server"] = server
return MYSQL_CONNECTION_STRING.format(**mysql_config)
def get_connection_string(include_password=True):
"""
Given the package configuration determine what results backend to use and
return the connection string.
"""
backend = CONFIG.results_backend.name.lower()
if backend not in BACKENDS:
msg = f"'{backend}' is not a supported results backend"
raise ValueError(msg)
if backend == "mysql":
return get_mysql(include_password=include_password)
if backend == "sqlite":
return SQLITE_CONNECTION_STRING
if backend == "redis":
return get_redis(include_password=include_password)
return None
def verify_config():
"""Verifies the backend configurations."""
try:
results_backend() # TODO ERROR this function is undefined
has_backend = True
except ValueError:
has_backend = False
LOG.debug(f"Has Results Backend: {has_backend}")
| 1.328125 | 1 |
app/__init__.py | landportal/landbook-data-access-api | 0 | 12769497 | # -*- coding: utf-8 -*-
# landportal-data-access-api
# Copyright (c)2014, WESO, Web Semantics Oviedo.
# Written by <NAME>.
# This file is part of landportal-data-access-api.
#
# landportal-data-access-api is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License.
#
# landportal-data-access-api is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with landportal-data-access-api. If not, see <http://www.gnu.org/licenses/>.
# landportal-data-access-api is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>
"""
Created on 03/02/2014
This file make the setup configuration for the Flask-Server
:author: <NAME>
"""
from flask.app import Flask
from flask.ext.cache import Cache
from flask.ext.track_usage import TrackUsage
from flask.ext.track_usage.storage.sql import SQLStorage
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://'
app.config['TRACK_USAGE_USE_FREEGEOIP'] = False
app.config['TRACK_USAGE_INCLUDE_OR_EXCLUDE_VIEWS'] = 'exclude'
cache = Cache(app, config={'CACHE_TYPE': 'memcached', 'CACHE_MEMCACHED_SERVERS': ['localhost:11211']})
app.config['DEBUG'] = True
db = SQLAlchemy(app)
sql_database_storage = SQLStorage(app.config['SQLALCHEMY_DATABASE_URI'], table_name='api_usage')
t = TrackUsage(app, sql_database_storage)
from app import views
| 1.242188 | 1 |
dotnet/private/rules/libraryset.bzl | wbiker/rules_dotnet | 0 | 12769498 | <gh_stars>0
load(
"@io_bazel_rules_dotnet//dotnet/private:context.bzl",
"dotnet_context",
)
load(
"@io_bazel_rules_dotnet//dotnet/private:providers.bzl",
"DotnetLibrary",
)
load("@io_bazel_rules_dotnet//dotnet/private:rules/common.bzl", "collect_transitive_info")
def _libraryset_impl(ctx):
"""_libraryset_impl implements the set of libraries."""
dotnet = dotnet_context(ctx)
name = ctx.label.name
# Handle case of empty toolchain on linux and darwin
if dotnet.assembly == None:
library = dotnet.new_library(dotnet = dotnet)
return [library]
(transitive_refs, transitive_runfiles, transitive_deps) = collect_transitive_info(ctx.attr.deps)
library = dotnet.new_library(
dotnet = dotnet,
name = name,
deps = ctx.attr.deps,
transitive = transitive_deps,
runfiles = transitive_runfiles,
transitive_refs = transitive_refs,
)
return [
library,
DefaultInfo(
runfiles = ctx.runfiles(files = [], transitive_files = library.runfiles),
),
]
dotnet_libraryset = rule(
_libraryset_impl,
attrs = {
"deps": attr.label_list(providers = [DotnetLibrary]),
"dotnet_context_data": attr.label(default = Label("@io_bazel_rules_dotnet//:dotnet_context_data")),
},
toolchains = ["@io_bazel_rules_dotnet//dotnet:toolchain"],
executable = False,
)
core_libraryset = rule(
_libraryset_impl,
attrs = {
"deps": attr.label_list(providers = [DotnetLibrary]),
"dotnet_context_data": attr.label(default = Label("@io_bazel_rules_dotnet//:core_context_data")),
},
toolchains = ["@io_bazel_rules_dotnet//dotnet:toolchain_core"],
executable = False,
)
net_libraryset = rule(
_libraryset_impl,
attrs = {
"deps": attr.label_list(providers = [DotnetLibrary]),
"dotnet_context_data": attr.label(default = Label("@io_bazel_rules_dotnet//:net_context_data")),
},
toolchains = ["@io_bazel_rules_dotnet//dotnet:toolchain_net"],
executable = False,
)
| 1.5625 | 2 |
label_smooth.py | chizhu/pytorch-loss | 1 | 12769499 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
class LabelSmoothSoftmaxCEV1(nn.Module):
'''
This is the autograd version, you can also try the LabelSmoothSoftmaxCEV2 that uses derived gradients
'''
def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothSoftmaxCEV1, self).__init__()
self.lb_smooth = lb_smooth
self.reduction = reduction
self.lb_ignore = ignore_index
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, logits, label):
'''
args: logits: tensor of shape (N, C, H, W)
args: label: tensor of shape(N, H, W)
'''
# overcome ignored label
logits = logits.float() # use fp32 to avoid nan
with torch.no_grad():
num_classes = logits.size(1)
label = label.clone().detach()
ignore = label == self.lb_ignore
n_valid = (ignore == 0).sum()
label[ignore] = 0
lb_pos, lb_neg = 1. - self.lb_smooth, self.lb_smooth / num_classes
label = torch.empty_like(logits).fill_(
lb_neg).scatter_(1, label.unsqueeze(1), lb_pos).detach()
logs = self.log_softmax(logits)
loss = -torch.sum(logs * label, dim=1)
loss[ignore] = 0
if self.reduction == 'mean':
loss = loss.sum() / n_valid
if self.reduction == 'sum':
loss = loss.sum()
return loss
class LSRCrossEntropyFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, label, lb_smooth, reduction, lb_ignore):
# prepare label
num_classes = logits.size(1)
label = label.clone().detach()
ignore = label == lb_ignore
n_valid = (ignore == 0).sum()
label[ignore] = 0
lb_pos, lb_neg = 1. - lb_smooth, lb_smooth / num_classes
label = torch.empty_like(logits).fill_(
lb_neg).scatter_(1, label.unsqueeze(1), lb_pos).detach()
ignore = ignore.nonzero()
_, M = ignore.size()
a, *b = ignore.chunk(M, dim=1)
mask = [a, torch.arange(label.size(1)), *b]
label[mask] = 0
coeff = (num_classes - 1) * lb_neg + lb_pos
ctx.coeff = coeff
ctx.mask = mask
ctx.logits = logits
ctx.label = label
ctx.reduction = reduction
ctx.n_valid = n_valid
loss = torch.log_softmax(logits, dim=1).neg_().mul_(label).sum(dim=1)
if reduction == 'mean':
loss = loss.sum().div_(n_valid)
if reduction == 'sum':
loss = loss.sum()
return loss
@staticmethod
def backward(ctx, grad_output):
coeff = ctx.coeff
mask = ctx.mask
logits = ctx.logits
label = ctx.label
reduction = ctx.reduction
n_valid = ctx.n_valid
scores = torch.softmax(logits, dim=1).mul_(coeff)
scores[mask] = 0
if reduction == 'none':
grad = scores.sub_(label).mul_(grad_output.unsqueeze(1))
elif reduction == 'sum':
grad = scores.sub_(label).mul_(grad_output)
elif reduction == 'mean':
grad = scores.sub_(label).mul_(grad_output.div_(n_valid))
return grad, None, None, None, None, None
class LabelSmoothSoftmaxCEV2(nn.Module):
def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothSoftmaxCEV2, self).__init__()
self.lb_smooth = lb_smooth
self.reduction = reduction
self.lb_ignore = ignore_index
def forward(self, logits, label):
return LSRCrossEntropyFunction.apply(
logits, label, self.lb_smooth, self.reduction, self.lb_ignore)
if __name__ == '__main__':
import torchvision
import torch
import numpy as np
import random
torch.manual_seed(15)
random.seed(15)
np.random.seed(15)
torch.backends.cudnn.deterministic = True
net1 = torchvision.models.resnet18(pretrained=True)
net2 = torchvision.models.resnet18(pretrained=True)
criteria1 = LabelSmoothSoftmaxCEV1(lb_smooth=0.1, ignore_index=255)
criteria2 = LabelSmoothSoftmaxCEV2(lb_smooth=0.1, ignore_index=255)
net1.cuda()
net2.cuda()
net1.train()
net2.train()
criteria1.cuda()
criteria2.cuda()
optim1 = torch.optim.SGD(net1.parameters(), lr=1e-2)
optim2 = torch.optim.SGD(net2.parameters(), lr=1e-2)
bs = 128
for it in range(300000):
inten = torch.randn(bs, 3, 224, 244).cuda()
inten[0, 1, 0, 0] = 255
inten[0, 0, 1, 2] = 255
inten[0, 2, 5, 28] = 255
lbs = torch.randint(0, 1000, (bs, )).cuda()
logits = net1(inten)
loss1 = criteria1(logits, lbs)
optim1.zero_grad()
loss1.backward()
optim1.step()
# print(net1.fc.weight[:, :5])
logits = net2(inten)
loss2 = criteria2(logits, lbs)
optim2.zero_grad()
loss2.backward()
optim2.step()
# print(net2.fc.weight[:, :5])
with torch.no_grad():
if (it+1) % 50 == 0:
print('iter: {}, ================='.format(it+1))
# print(net1.fc.weight.numel())
print(torch.mean(torch.abs(net1.fc.weight - net2.fc.weight)).item())
print(torch.mean(torch.abs(net1.conv1.weight - net2.conv1.weight)).item())
# print(loss1.item())
# print(loss2.item())
print(loss1.item() - loss2.item())
| 2.71875 | 3 |
src/python/pants/bsp/goal.py | danxmoran/pants | 0 | 12769500 | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import logging
import os
import shlex
import sys
import textwrap
from typing import Mapping
from pants.base.build_root import BuildRoot
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE, ExitCode
from pants.base.specs import Specs
from pants.bsp.context import BSPContext
from pants.bsp.protocol import BSPConnection
from pants.bsp.util_rules.lifecycle import BSP_VERSION, BSPLanguageSupport
from pants.build_graph.build_configuration import BuildConfiguration
from pants.engine.environment import CompleteEnvironment
from pants.engine.internals.session import SessionValues
from pants.engine.unions import UnionMembership
from pants.goal.builtin_goal import BuiltinGoal
from pants.init.engine_initializer import GraphSession
from pants.option.option_types import BoolOption, FileListOption, StrListOption
from pants.option.option_value_container import OptionValueContainer
from pants.option.options import Options
from pants.util.docutil import bin_name
from pants.util.strutil import softwrap
from pants.version import VERSION
_logger = logging.getLogger(__name__)
class BSPGoal(BuiltinGoal):
name = "experimental-bsp"
help = "Setup repository for Build Server Protocol (https://build-server-protocol.github.io/)."
server = BoolOption(
"--server",
default=False,
advanced=True,
help=softwrap(
"""
Run the Build Server Protocol server. Pants will receive BSP RPC requests via the console.
This should only ever be invoked via the IDE.
"""
),
)
runner_env_vars = StrListOption(
"--runner-env-vars",
default=["PATH"],
help=softwrap(
f"""
Environment variables to set in the BSP runner script when setting up BSP in a repository.
Entries are either strings in the form `ENV_VAR=value` to set an explicit value;
or just `ENV_VAR` to copy the value from Pants' own environment when the {name} goal was run.
This option only takes effect when the BSP runner script is written. If the option changes, you
must run `{bin_name()} {name}` again to write a new copy of the BSP runner script.
Note: The environment variables passed to the Pants BSP server will be those set for your IDE
and not your shell. For example, on macOS, the IDE is generally launched by `launchd` after
clicking on a Dock icon, and not from the shell. Thus, any environment variables set for your
shell will likely not be seen by the Pants BSP server. At the very least, on macOS consider
writing an explicit PATH into the BSP runner script via this option.
"""
),
advanced=True,
)
groups_config_files = FileListOption(
"--groups-config-files",
help=softwrap(
"""
A list of config files that define groups of Pants targets to expose to IDEs via Build Server Protocol.
Pants generally uses fine-grained targets to define the components of a build (in many cases on a file-by-file
basis). Many IDEs, however, favor coarse-grained targets that contain large numbers of source files.
To accommodate this distinction, the Pants BSP server will compute a set of BSP build targets to use
from the groups specified in the config files set for this option. Each group will become one or more
BSP build targets.
Each config file is a TOML file with a `groups` dictionary with the following format for an entry:
# The dictionary key is used to identify the group. It must be unique.
[groups.ID1]:
# One or more Pants address specs defining what targets to include in the group.
addresses = [
"src/jvm::",
"tests/jvm::",
]
# Filter targets to a specific resolve. Targets in a group must be from a single resolve.
# Format of filter is `TYPE:RESOLVE_NAME`. The only supported TYPE is `jvm`. RESOLVE_NAME must be
# a valid resolve name.
resolve = "jvm:jvm-default"
display_name = "Display Name" # (Optional) Name shown to the user in the IDE.
base_directory = "path/from/build/root" # (Optional) Hint to the IDE for where the build target should "live."
Pants will merge the contents of the config files together. If the same ID is used for a group definition,
in multiple config files, the definition in the latter config file will take effect.
"""
),
)
def run(
self,
*,
build_config: BuildConfiguration,
graph_session: GraphSession,
options: Options,
specs: Specs,
union_membership: UnionMembership,
) -> ExitCode:
goal_options = options.for_scope(self.name)
if goal_options.server:
return self._run_server(
graph_session=graph_session,
union_membership=union_membership,
)
current_session_values = graph_session.scheduler_session.py_session.session_values
env = current_session_values[CompleteEnvironment]
return self._setup_bsp_connection(
union_membership=union_membership, env=env, options=goal_options
)
def _setup_bsp_connection(
self,
union_membership: UnionMembership,
env: Mapping[str, str],
options: OptionValueContainer,
) -> ExitCode:
"""Setup the BSP connection file."""
build_root = BuildRoot()
bsp_conn_path = build_root.pathlib_path / ".bsp" / "pants.json"
if bsp_conn_path.exists():
print(
f"ERROR: A BSP connection file already exists at path `{bsp_conn_path}`. "
"Please delete that file if you intend to re-setup BSP in this repository.",
file=sys.stderr,
)
return PANTS_FAILED_EXIT_CODE
bsp_dir = build_root.pathlib_path / ".pants.d" / "bsp"
bsp_scripts_dir = bsp_dir / "scripts"
bsp_scripts_dir.mkdir(exist_ok=True, parents=True)
bsp_logs_dir = bsp_dir / "logs"
bsp_logs_dir.mkdir(exist_ok=True, parents=True)
# Determine which environment variables to set in the BSP runner script.
# TODO: Consider whether some of this logic could be shared with
# `pants.engine.environment.CompleteEnvironment.get_subset`.
run_script_env_lines: list[str] = []
for env_var in options.runner_env_vars:
if "=" in env_var:
run_script_env_lines.append(env_var)
else:
if env_var not in env:
print(
f"ERROR: The `[{self.name}].runner_env_vars` option is configured to add the `{env_var}` "
"environment variable to the BSP runner script using its value in the current environment. "
"That environment variable, however, is not present in the current environment. "
"Please either set it in the current environment first or else configure a specific value "
"in `pants.toml`.",
file=sys.stderr,
)
return PANTS_FAILED_EXIT_CODE
run_script_env_lines.append(f"{env_var}={env[env_var]}")
run_script_env_lines_str = "\n".join(
[f"export {shlex.quote(line)}" for line in run_script_env_lines]
)
run_script_path = bsp_scripts_dir / "run-bsp.sh"
run_script_path.write_text(
textwrap.dedent(
f"""\
#!/bin/sh
{run_script_env_lines_str}
exec 2>>{shlex.quote(str(bsp_logs_dir / 'stderr.log'))}
env 1>&2
exec {shlex.quote(bin_name())} --no-pantsd {self.name} --server
"""
)
)
run_script_path.chmod(0o755)
_logger.info(f"Wrote BSP runner script to `{run_script_path}`.")
bsp_conn_data = {
"name": "Pants",
"version": VERSION,
"bspVersion": BSP_VERSION,
"languages": sorted(
[lang.language_id for lang in union_membership.get(BSPLanguageSupport)]
),
"argv": ["./.pants.d/bsp/scripts/run-bsp.sh"],
}
bsp_conn_path.parent.mkdir(exist_ok=True, parents=True)
bsp_conn_path.write_text(json.dumps(bsp_conn_data))
_logger.info(f"Wrote BSP connection file to `{bsp_conn_path}`.")
return PANTS_SUCCEEDED_EXIT_CODE
def _run_server(
self,
*,
graph_session: GraphSession,
union_membership: UnionMembership,
) -> ExitCode:
"""Run the BSP server."""
current_session_values = graph_session.scheduler_session.py_session.session_values
context = BSPContext()
session_values = SessionValues(
{
**current_session_values,
BSPContext: context,
}
)
scheduler_session = graph_session.scheduler_session.scheduler.new_session(
build_id="bsp", dynamic_ui=False, session_values=session_values
)
saved_stdout = sys.stdout
saved_stdin = sys.stdin
try:
sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", buffering=0) # type: ignore[assignment]
sys.stdin = os.fdopen(sys.stdin.fileno(), "rb", buffering=0) # type: ignore[assignment]
conn = BSPConnection(
scheduler_session,
union_membership,
context,
sys.stdin, # type: ignore[arg-type]
sys.stdout, # type: ignore[arg-type]
)
conn.run()
finally:
sys.stdout = saved_stdout
sys.stdin = saved_stdin
return ExitCode(0)
| 2.015625 | 2 |
datasets/youtube_caption_corrections/youtube_caption_corrections.py | BobbyManion/datasets | 1 | 12769501 | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset built from <auto-generated, manually corrected> caption pairs of
YouTube videos with labels capturing the differences between the two."""
import json
import datasets
_CITATION = ""
_DESCRIPTION = """\
Dataset built from pairs of YouTube captions where both 'auto-generated' and
'manually-corrected' captions are available for a single specified language.
This dataset labels two-way (e.g. ignoring single-sided insertions) same-length
token differences in the `diff_type` column. The `default_seq` is composed of
tokens from the 'auto-generated' captions. When a difference occurs between
the 'auto-generated' vs 'manually-corrected' captions types, the `correction_seq`
contains tokens from the 'manually-corrected' captions.
"""
_LICENSE = "MIT License"
_RELEASE_TAG = "v1.0"
_NUM_FILES = 4
_URLS = [
f"https://raw.githubusercontent.com/2dot71mily/youtube_captions_corrections/{_RELEASE_TAG}/data/transcripts/en/split/youtube_caption_corrections_{i}.json"
for i in range(_NUM_FILES)
]
class YoutubeCaptionCorrections(datasets.GeneratorBasedBuilder):
"""YouTube captions corrections."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"video_ids": datasets.Value("string"),
"default_seq": datasets.Sequence(datasets.Value("string")),
"correction_seq": datasets.Sequence(datasets.Value("string")),
"diff_type": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"NO_DIFF",
"CASE_DIFF",
"PUNCUATION_DIFF",
"CASE_AND_PUNCUATION_DIFF",
"STEM_BASED_DIFF",
"DIGIT_DIFF",
"INTRAWORD_PUNC_DIFF",
"UNKNOWN_TYPE_DIFF",
"RESERVED_DIFF",
]
)
),
}
),
supervised_keys=("correction_seq", "diff_type"),
homepage="https://github.com/2dot71mily/youtube_captions_corrections",
license=_LICENSE,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_filepaths = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": downloaded_filepaths},
),
]
def _generate_examples(self, filepaths):
"""Yields examples."""
for fp in filepaths:
with open(fp, "r", encoding="utf-8") as json_file:
json_lists = list(json_file)
for json_list_str in json_lists:
json_list = json.loads(json_list_str)
for ctr, result in enumerate(json_list):
response = {
"video_ids": result["video_ids"],
"diff_type": result["diff_type"],
"default_seq": result["default_seq"],
"correction_seq": result["correction_seq"],
}
yield ctr, response
| 2.390625 | 2 |
mx.wasm/mx_wasm_benchmark.py | Gwandalff/SelfAdaptableWASM | 0 | 12769502 | <filename>mx.wasm/mx_wasm_benchmark.py
#
# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import mx
import mx_benchmark
import os
import re
import shutil
import stat
import tempfile
import zipfile
from mx_benchmark import JMHDistBenchmarkSuite
from mx_benchmark import add_bm_suite
from mx_benchmark import add_java_vm
_suite = mx.suite("wasm")
BENCHMARK_NAME_PREFIX = "-Dwasmbench.benchmarkName="
SUITE_NAME_SUFFIX = "BenchmarkSuite"
BENCHMARK_JAR_SUFFIX = "benchmarkcases.jar"
node_dir = mx.get_env("NODE_DIR", None)
def _toKebabCase(name, skewer="-"):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1" + skewer + r"\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1" + skewer + r"\2", s1).lower()
class WasmBenchmarkVm(mx_benchmark.OutputCapturingVm):
"""
This is a special kind of Wasm VM that expects the benchmark suite to provide
a JAR file that has each benchmark compiled to a native binary,
a JS program that runs the Wasm benchmark (generated e.g. with Emscripten),
and the set of files that are required by the GraalWasm test suite.
These files must be organized in a predefined structure,
so that the different VM implementations know where to look for them.
If a Wasm benchmark suite consists of benchmarks in the category `c`,
then the binaries of that benchmark must structured as follows:
- For GraalWasm: bench/x/{*.wasm, *.init, *.result, *.wat}
- For Node: bench/x/node/{*.wasm, *.js}
- For native binaries: bench/x/native/*<platform-specific-binary-extension>
Furthermore, these VMs expect that the benchmark suites that use them
will provide a `-Dwasmbench.benchmarkName=<benchmark-name>` command-line flag,
and the `CBenchmarkSuite` argument, where `<benchmark-name>` specifies a benchmark
in the category `c`.
"""
def name(self):
return "wasm-benchmark"
def post_process_command_line_args(self, args):
return args
def parse_suite_benchmark(self, args):
suite = next(iter([arg for arg in args if arg.endswith(SUITE_NAME_SUFFIX)]), None)
if suite is None:
mx.abort("Suite must specify a flag that ends with " + SUITE_NAME_SUFFIX)
suite = suite[:-len(SUITE_NAME_SUFFIX)]
suite = _toKebabCase(suite, "/")
benchmark = next(iter([arg for arg in args if arg.startswith(BENCHMARK_NAME_PREFIX)]), None)
if benchmark is None:
mx.abort("Suite must specify a flag that starts with " + BENCHMARK_NAME_PREFIX)
else:
benchmark = benchmark[len(BENCHMARK_NAME_PREFIX):]
return suite, benchmark
def parse_jar_suite_benchmark(self, args):
if "-cp" not in args:
mx.abort("Suite must specify -cp.")
classpath = args[args.index("-cp") + 1]
delimiter = ";" if mx.is_windows() else ":"
jars = classpath.split(delimiter)
jar = next(iter([jar for jar in jars if jar.endswith(BENCHMARK_JAR_SUFFIX)]), None)
if jar is None:
mx.abort("No benchmark jar file is specified in the classpath.")
suite, benchmark = self.parse_suite_benchmark(args)
return jar, suite, benchmark
def extract_jar_to_tempdir(self, jar, mode, suite, benchmark):
tmp_dir = tempfile.mkdtemp()
with zipfile.ZipFile(jar, "r") as z:
for name in z.namelist():
if name.startswith(os.path.join("bench", suite, mode, benchmark)):
z.extract(name, tmp_dir)
return tmp_dir
def rules(self, output, benchmarks, bmSuiteArgs):
suite, benchmark = self.parse_suite_benchmark(bmSuiteArgs)
return [
mx_benchmark.StdOutRule(
r"ops/sec = (?P<throughput>[0-9]+.[0-9]+)",
{
"benchmark": suite + "/" + benchmark,
"vm": self.config_name(),
"metric.name": "throughput",
"metric.value": ("<throughput>", float),
"metric.unit": "ops/s",
"metric.type": "numeric",
"metric.score-function": "id",
"metric.better": "higher",
"metric.iteration": 0,
}
)
]
class NodeWasmBenchmarkVm(WasmBenchmarkVm):
def config_name(self):
return "node"
def run_vm(self, args, out=None, err=None, cwd=None, nonZeroIsFatal=False):
if node_dir is None:
mx.abort("Must set the NODE_DIR environment variable to point to Node's bin dir.")
jar, suite, benchmark = self.parse_jar_suite_benchmark(args)
tmp_dir = None
try:
mode = self.config_name()
tmp_dir = self.extract_jar_to_tempdir(jar, mode, suite, benchmark)
node_cmd = os.path.join(node_dir, mode)
node_cmd_line = [node_cmd, os.path.join(tmp_dir, "bench", suite, mode, benchmark + ".js")]
mx.log("Running benchmark " + benchmark + " with node.")
mx.run(node_cmd_line, cwd=tmp_dir, out=out, err=err, nonZeroIsFatal=nonZeroIsFatal)
finally:
if tmp_dir:
shutil.rmtree(tmp_dir)
return 0
class NativeWasmBenchmarkVm(WasmBenchmarkVm):
def config_name(self):
return "native"
def run_vm(self, args, out=None, err=None, cwd=None, nonZeroIsFatal=False):
jar, suite, benchmark = self.parse_jar_suite_benchmark(args)
tmp_dir = None
try:
mode = self.config_name()
tmp_dir = self.extract_jar_to_tempdir(jar, mode, suite, benchmark)
binary_path = os.path.join(tmp_dir, "bench", suite, mode, mx.exe_suffix(benchmark))
os.chmod(binary_path, stat.S_IRUSR | stat.S_IXUSR)
cmd_line = [binary_path]
mx.log("Running benchmark " + benchmark + " natively.")
mx.run(cmd_line, cwd=tmp_dir, out=out, err=err, nonZeroIsFatal=nonZeroIsFatal)
finally:
if tmp_dir:
shutil.rmtree(tmp_dir)
return 0
add_java_vm(NodeWasmBenchmarkVm(), suite=_suite, priority=1)
add_java_vm(NativeWasmBenchmarkVm(), suite=_suite, priority=1)
class WasmJMHJsonRule(mx_benchmark.JMHJsonRule):
def getBenchmarkNameFromResult(self, result):
name_flag = "-Dwasmbench.benchmarkName="
name_arg = next(arg for arg in result["jvmArgs"] if arg.startswith(name_flag))
return name_arg[len(name_flag):]
class WasmBenchmarkSuite(JMHDistBenchmarkSuite):
def name(self):
return "wasm"
def group(self):
return "Graal"
def benchSuiteName(self, bmSuiteArgs):
return next(arg for arg in bmSuiteArgs if arg.endswith("BenchmarkSuite"))
def subgroup(self):
return "wasm"
def successPatterns(self):
return []
def isWasmBenchmarkVm(self, bmSuiteArgs):
jvm_config = bmSuiteArgs[bmSuiteArgs.index("--jvm-config") + 1]
return jvm_config == "node" or jvm_config == "native"
def rules(self, out, benchmarks, bmSuiteArgs):
if self.isWasmBenchmarkVm(bmSuiteArgs):
return []
return [WasmJMHJsonRule(mx_benchmark.JMHBenchmarkSuiteBase.jmh_result_file, self.benchSuiteName(bmSuiteArgs))]
add_bm_suite(WasmBenchmarkSuite())
| 1.445313 | 1 |
web/api/auth/views.py | FrankyTerra/broadcast | 0 | 12769503 | <reponame>FrankyTerra/broadcast
from api.auth.mixins import GetRequestUserAsObjectMixin
from api.auth.serializers import ProfileRetrieveSerializer
from core.api.views import CustomRetrieveAPIView
from user.models import User
class UserProfileRetrieveView(GetRequestUserAsObjectMixin, CustomRetrieveAPIView):
"""
Представление основной информации профиля пользователя
"""
queryset = User.objects.all()
serializer_class = ProfileRetrieveSerializer
| 1.835938 | 2 |
global_custom/custom/python/delivery_note.py | VPS-Consultancy/global_custom | 0 | 12769504 | from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
import frappe
from frappe import _
@frappe.whitelist()
def uom_list(item):
uom_list=frappe.db.get_list('UOM Conversion Detail',{"parent":item},'uom')
new_uoms = []
for uom in uom_list:
new_uoms.append(uom['uom'])
return new_uoms
def update_dn(doc, action):
for row in doc.items:
if row.item_code and row.uom:
uom_list=frappe.db.get_list('UOM Conversion Detail',{"parent":row.item_code},'uom')
new_uoms = []
for uom in uom_list:
new_uoms.append(uom['uom'])
if row.uom not in new_uoms:
frappe.throw((f"UOM {row.uom} is invalid for the item {row.item_code} in the row {row.idx}"))
def restrict_role(doc, action):
if doc.is_return:
roles = frappe.get_roles()
restricted_role = ['Muntaqeem']
for role in restricted_role:
if role in roles:
frappe.throw(_('Not Permitted')) | 2.03125 | 2 |
src/schemas/WordSchema.py | jodiefostersarmy/T4A2 | 0 | 12769505 | from main import ma
from models.Word import Word
from marshmallow.validate import Length
class WordSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Word
word = ma.String(required=True, validate=Length(min=3))
definition = ma.String(required=True, validate=Length(min=5))
pronunciation = ma.String(required=True, validate=Length(min=1))
word_schema = WordSchema()
words_schema = WordSchema(many=True) | 2.578125 | 3 |
src/config.py | gustavodsf/dokcer-py-gmail | 1 | 12769506 | import json
import logging
class Config(object):
def __init__(self):
self.logger = logging.getLogger('CONFIG')
def read(self):
with open("config.json") as file:
data = json.load(file)
file.close()
self.logger.info("Success on reading configuration file.")
return data | 3.109375 | 3 |
src/sql_alchemy/01_start_vc5_data_auto_service/restful_auto_service/data/car.py | turing4ever/restful-services-in-pyramid | 58 | 12769507 | from dateutil.parser import parse
class Car:
def __init__(self, brand, name, price, year, damage, last_seen, image, id=None):
self.image = image
self.last_seen = parse(last_seen)
self.damage = damage
self.year = year
self.price = price
self.name = name
self.brand = brand
self.id = id
def to_dict(self):
return {
'brand': self.brand,
'name': self.name,
'price': self.price,
'year': self.year,
'damage': self.damage,
'last_seen': self.last_seen.isoformat(),
'id': self.id,
'image': self.image
}
| 3.0625 | 3 |
Computer_science/B05_Python/01_Basics/S01_Basic_syntax.py | Polirecyliente/SGConocimiento | 0 | 12769508 |
# Basic syntax
#T# Table of contents
#C# Blocks of code
#C# Variables, constants, literals
#C# - Type hints
#C# Escape sequences and escaped chars
#C# Expressions
#C# Function calls
#C# Statements
#C# Multiline statements
#C# Multistatement lines
#C# Metadata
#T# Beginning of content
# |-------------------------------------------------------------
#T# run a Python file /path/to/file1.py in the operating system shell with
# SYNTAX python3 /path/to/file1.py
#T# output variables to stdout with the print function
# SYNTAX print(var1, var2, var3)
#T# the same can be done by simply typing the variable's name in the Python prompt
# SYNTAX var1
#T# get help about a symbol or name (function, attribute, etc.) with the help function, used in a script or in the Python prompt
# SYNTAX help(name1)
help(round)
# |-------------------------------------------------------------
#C# Blocks of code
# |-------------------------------------------------------------
#T# indentation acts as the delimiter (like braces)
if True:
int1 = 70
# |-------------------------------------------------------------
#C# Variables, constants, literals
# |-------------------------------------------------------------
#T# variables are dynamically typed. Literals are characters, numbers
str1 = 'str'
int1 = 5
#T# variable identifiers can only contain letters, numbers, and the underscore
var_aux1 = 0
#T# strings are created within quotation symbols, each quotation symbol type can be used inside the others as a normal character
str1 = 'sen1word1 sen1""""word2 sen1word3 sen1word4'
str1 = "sen2word1 sen2''''word2 sen2word3 sen2word4"
str1 = """sen3word1 sen3"'"'word2
sen3word3 sen3word4"""
#C# - Type hints
# |-----
#T# type hints are used when there is no autocompletion for an object that should have, this is because the type of a dynamic type can't be known at compile time
# SYNTAX obj1 = Constructor1() # type: Object_type1
#T# the type hint is written after the comment, the autocompletion of obj1 follows the type Object_type1
var1 = 8 # type: str
# var1. autocompletes as a string
# |-----
# |-------------------------------------------------------------
#C# Escape sequences and escaped chars
# |-------------------------------------------------------------
#T# an special kind of combination of literals are escape sequences and escaped chars, escape sequences mean something particular, e.g. the \n means newline, escaped chars make operators to become literals, e.g. the {{ makes a single curly brace to be taken literally
# SYNTAX \char1
#T# an escape starts with a backslash \ and is followed by char1 which is other character or characters, most commonly char1 is a single character
# SYNTAX {{
# SYNTAX }}
#T# in interpolated strings, duplicating the curly braces escapes them
str1 = f"Line1\n{{Line2}}"
#T# printing str1 gives the following
# Line1
# {Line2}
# |-------------------------------------------------------------
#C# Expressions
# |-------------------------------------------------------------
#T# expressions are evaluated and return a value
int1 = 5 + 3 - 7 # 1
# |-------------------------------------------------------------
#C# Function calls
# |-------------------------------------------------------------
#T# a function call is made with the function name followed by a pair of parentheses that contain the arguments separated by comma
#T# the function name can be separated by whitespace (but not a newline) from the arguments in parentheses
#T# basic output to stdout is done with the print function
print("Hello, Python!")
#T# basic input from stdin is done with the input function
var1 = input ("\nPlease enter var1 as input\n")
#T# the arguments to a function contain positional arguments and kwarg pairs
# SYNTAX func1(pos1, pos2, kwarg1 = value1, kwarg2 = value2)
# SYNTAX func1(pos1, pos2, kwargs1)
#T# pos1 and pos2 represent positional arguments so they must be written in that order of positions at the start of the parentheses
#T# kwarg1 = value1, and kwarg2 = value2 are kwarg value pairs that allow naming arguments, these can be written in any order
#T# kwargs1 are the kwarg value pairs, this is a shorthand notation to indicate the kwarg value pairs without writing them one by one
str1 = 'string to print'
print(str1, end = '\n') #| str1 is a positional argument, and the end kwarg is used to set the character at the end of the string
#T# a function signature is a short way to write how to use a function, commonly it contains the return value, the function name, and the arguments, which is the minimum needed
# SYNTAX value1 = func1(arg1, arg2, arg3)
# |-------------------------------------------------------------
#C# Statements
# |-------------------------------------------------------------
#T# a statement is a complete instruction
int1 = 5
#T# int1 = 5 is a statement, the variable int1 is made to point to the value 5
#T# the dot notation is used to access the namespace of an object
# SYNTAX obj1.name1.name2
#T# here the object obj1 has name1 in its namespace, and name1 has name2 in its namespace
int1 = 5
int1 = int1.bit_length() # 3
#T# the bit_length function is accessed using dot notation on an integer
#T# the pass keyword is used to create a no-op statement that does nothing, as a filler
pass
# |-------------------------------------------------------------
#C# Multiline statements
# |-------------------------------------------------------------
#T# a multiline statement is created with a backslash at the end of each line, a list can be continued without a comma at the end of the line
int1 = 5 + 6 + \
3 + 7 # 21
list1 = ['Mon', 'Wed'
'Fri', 'Sun']
# |-------------------------------------------------------------
#C# Multistatement lines
# |-------------------------------------------------------------
#T# multiple statements in the same line are separated with a semicolon
int1 = 9; int2 = 7; int3 = 0
# |-------------------------------------------------------------
#C# Metadata
# |-------------------------------------------------------------
#T# the character encoding of a python file can be declared with the `coding` metadata placed at the start of the file, for example:
# coding=utf-8
# |------------------------------------------------------------- | 3.390625 | 3 |
TweakApi/apis/data_source_rest_api.py | tweak-com-public/tweak-api-client-python | 0 | 12769509 | <reponame>tweak-com-public/tweak-api-client-python
# coding: utf-8
"""
tweak-api
Tweak API to integrate with all the Tweak services. You can find out more about Tweak at <a href='https://www.tweak.com'>https://www.tweak.com</a>, #tweak.
OpenAPI spec version: 1.0.8-beta.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class DataSourceRestApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def data_source_rests_change_stream_get(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_change_stream_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_change_stream_get_with_http_info(**kwargs)
else:
(data) = self.data_source_rests_change_stream_get_with_http_info(**kwargs)
return data
def data_source_rests_change_stream_get_with_http_info(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_change_stream_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['options']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_change_stream_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/DataSourceRests/change-stream'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'options' in params:
query_params['options'] = params['options']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_change_stream_post(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_change_stream_post(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_change_stream_post_with_http_info(**kwargs)
else:
(data) = self.data_source_rests_change_stream_post_with_http_info(**kwargs)
return data
def data_source_rests_change_stream_post_with_http_info(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_change_stream_post_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['options']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_change_stream_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/DataSourceRests/change-stream'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'options' in params:
form_params.append(('options', params['options']))
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_count_get(self, **kwargs):
"""
Count instances of the model matched by where from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_count_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_count_get_with_http_info(**kwargs)
else:
(data) = self.data_source_rests_count_get_with_http_info(**kwargs)
return data
def data_source_rests_count_get_with_http_info(self, **kwargs):
"""
Count instances of the model matched by where from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_count_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['where']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_count_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/DataSourceRests/count'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'where' in params:
query_params['where'] = params['where']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_find_one_get(self, **kwargs):
"""
Find first instance of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_find_one_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_find_one_get_with_http_info(**kwargs)
else:
(data) = self.data_source_rests_find_one_get_with_http_info(**kwargs)
return data
def data_source_rests_find_one_get_with_http_info(self, **kwargs):
"""
Find first instance of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_find_one_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_find_one_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/DataSourceRests/findOne'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DataSourceRest',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_get(self, **kwargs):
"""
Find all instances of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: list[DataSourceRest]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_get_with_http_info(**kwargs)
else:
(data) = self.data_source_rests_get_with_http_info(**kwargs)
return data
def data_source_rests_get_with_http_info(self, **kwargs):
"""
Find all instances of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: list[DataSourceRest]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/DataSourceRests'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DataSourceRest]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_delete(self, id, **kwargs):
"""
Delete a model instance by {{id}} from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_delete(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_delete_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_delete_with_http_info(id, **kwargs)
return data
def data_source_rests_id_delete_with_http_info(self, id, **kwargs):
"""
Delete a model instance by {{id}} from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_delete_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_delete`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_dynamic_datas_count_get(self, id, **kwargs):
"""
Counts dynamicDatas of DataSourceRest.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_count_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_dynamic_datas_count_get_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_dynamic_datas_count_get_with_http_info(id, **kwargs)
return data
def data_source_rests_id_dynamic_datas_count_get_with_http_info(self, id, **kwargs):
"""
Counts dynamicDatas of DataSourceRest.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_count_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'where']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_dynamic_datas_count_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_dynamic_datas_count_get`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}/dynamicDatas/count'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'where' in params:
query_params['where'] = params['where']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_dynamic_datas_delete(self, id, **kwargs):
"""
Deletes all dynamicDatas of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_delete(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_dynamic_datas_delete_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_dynamic_datas_delete_with_http_info(id, **kwargs)
return data
def data_source_rests_id_dynamic_datas_delete_with_http_info(self, id, **kwargs):
"""
Deletes all dynamicDatas of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_delete_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_dynamic_datas_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_dynamic_datas_delete`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}/dynamicDatas'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_dynamic_datas_fk_delete(self, id, fk, **kwargs):
"""
Delete a related item by id for dynamicDatas.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_fk_delete(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param str fk: Foreign key for dynamicDatas (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_dynamic_datas_fk_delete_with_http_info(id, fk, **kwargs)
else:
(data) = self.data_source_rests_id_dynamic_datas_fk_delete_with_http_info(id, fk, **kwargs)
return data
def data_source_rests_id_dynamic_datas_fk_delete_with_http_info(self, id, fk, **kwargs):
"""
Delete a related item by id for dynamicDatas.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_fk_delete_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param str fk: Foreign key for dynamicDatas (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_dynamic_datas_fk_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_dynamic_datas_fk_delete`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `data_source_rests_id_dynamic_datas_fk_delete`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}/dynamicDatas/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_dynamic_datas_fk_get(self, id, fk, **kwargs):
"""
Find a related item by id for dynamicDatas.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_fk_get(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param str fk: Foreign key for dynamicDatas (required)
:return: DynamicData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_dynamic_datas_fk_get_with_http_info(id, fk, **kwargs)
else:
(data) = self.data_source_rests_id_dynamic_datas_fk_get_with_http_info(id, fk, **kwargs)
return data
def data_source_rests_id_dynamic_datas_fk_get_with_http_info(self, id, fk, **kwargs):
"""
Find a related item by id for dynamicDatas.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_fk_get_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param str fk: Foreign key for dynamicDatas (required)
:return: DynamicData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_dynamic_datas_fk_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_dynamic_datas_fk_get`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `data_source_rests_id_dynamic_datas_fk_get`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}/dynamicDatas/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DynamicData',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_dynamic_datas_fk_put(self, id, fk, **kwargs):
"""
Update a related item by id for dynamicDatas.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_fk_put(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param str fk: Foreign key for dynamicDatas (required)
:param DynamicData data:
:return: DynamicData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_dynamic_datas_fk_put_with_http_info(id, fk, **kwargs)
else:
(data) = self.data_source_rests_id_dynamic_datas_fk_put_with_http_info(id, fk, **kwargs)
return data
def data_source_rests_id_dynamic_datas_fk_put_with_http_info(self, id, fk, **kwargs):
"""
Update a related item by id for dynamicDatas.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_fk_put_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param str fk: Foreign key for dynamicDatas (required)
:param DynamicData data:
:return: DynamicData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_dynamic_datas_fk_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_dynamic_datas_fk_put`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `data_source_rests_id_dynamic_datas_fk_put`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}/dynamicDatas/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DynamicData',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_dynamic_datas_get(self, id, **kwargs):
"""
Queries dynamicDatas of DataSourceRest.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param str filter:
:return: list[DynamicData]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_dynamic_datas_get_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_dynamic_datas_get_with_http_info(id, **kwargs)
return data
def data_source_rests_id_dynamic_datas_get_with_http_info(self, id, **kwargs):
"""
Queries dynamicDatas of DataSourceRest.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param str filter:
:return: list[DynamicData]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_dynamic_datas_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_dynamic_datas_get`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}/dynamicDatas'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DynamicData]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_dynamic_datas_post(self, id, **kwargs):
"""
Creates a new instance in dynamicDatas of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_post(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param DynamicData data:
:return: DynamicData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_dynamic_datas_post_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_dynamic_datas_post_with_http_info(id, **kwargs)
return data
def data_source_rests_id_dynamic_datas_post_with_http_info(self, id, **kwargs):
"""
Creates a new instance in dynamicDatas of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_dynamic_datas_post_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param DynamicData data:
:return: DynamicData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_dynamic_datas_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_dynamic_datas_post`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}/dynamicDatas'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DynamicData',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_exists_get(self, id, **kwargs):
"""
Check whether a model instance exists in the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_exists_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_exists_get_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_exists_get_with_http_info(id, **kwargs)
return data
def data_source_rests_id_exists_get_with_http_info(self, id, **kwargs):
"""
Check whether a model instance exists in the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_exists_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_exists_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_exists_get`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}/exists'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_get(self, id, **kwargs):
"""
Find a model instance by {{id}} from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param str filter: Filter defining fields and include - must be a JSON-encoded string ({\"something\":\"value\"})
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_get_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_get_with_http_info(id, **kwargs)
return data
def data_source_rests_id_get_with_http_info(self, id, **kwargs):
"""
Find a model instance by {{id}} from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param str filter: Filter defining fields and include - must be a JSON-encoded string ({\"something\":\"value\"})
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_get`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DataSourceRest',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_head(self, id, **kwargs):
"""
Check whether a model instance exists in the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_head(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_head_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_head_with_http_info(id, **kwargs)
return data
def data_source_rests_id_head_with_http_info(self, id, **kwargs):
"""
Check whether a model instance exists in the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_head_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_head" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_head`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'HEAD',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_patch(self, id, **kwargs):
"""
Patch attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_patch(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param DataSourceRest data: An object of model property name/value pairs
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_patch_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_patch_with_http_info(id, **kwargs)
return data
def data_source_rests_id_patch_with_http_info(self, id, **kwargs):
"""
Patch attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_patch_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param DataSourceRest data: An object of model property name/value pairs
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_patch`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DataSourceRest',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_put(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_put(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param DataSourceRest data: Model instance data
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_put_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_put_with_http_info(id, **kwargs)
return data
def data_source_rests_id_put_with_http_info(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_put_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param DataSourceRest data: Model instance data
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_put`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DataSourceRest',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_replace_post(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_replace_post(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param DataSourceRest data: Model instance data
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_replace_post_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_replace_post_with_http_info(id, **kwargs)
return data
def data_source_rests_id_replace_post_with_http_info(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_replace_post_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param DataSourceRest data: Model instance data
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_replace_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_replace_post`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}/replace'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DataSourceRest',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_id_team_get(self, id, **kwargs):
"""
Fetches belongsTo relation team.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_team_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param bool refresh:
:return: Team
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_id_team_get_with_http_info(id, **kwargs)
else:
(data) = self.data_source_rests_id_team_get_with_http_info(id, **kwargs)
return data
def data_source_rests_id_team_get_with_http_info(self, id, **kwargs):
"""
Fetches belongsTo relation team.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_id_team_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: DataSourceRest id (required)
:param bool refresh:
:return: Team
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_id_team_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `data_source_rests_id_team_get`")
collection_formats = {}
resource_path = '/DataSourceRests/{id}/team'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Team',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def data_source_rests_post(self, **kwargs):
"""
Create a new instance of the model and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_post(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param DataSourceRest data: Model instance data
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.data_source_rests_post_with_http_info(**kwargs)
else:
(data) = self.data_source_rests_post_with_http_info(**kwargs)
return data
def data_source_rests_post_with_http_info(self, **kwargs):
"""
Create a new instance of the model and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.data_source_rests_post_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param DataSourceRest data: Model instance data
:return: DataSourceRest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_source_rests_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/DataSourceRests'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DataSourceRest',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
| 1.429688 | 1 |
machete/machete/spiders/versioninspector.py | jsvanilla/machete | 0 | 12769510 | import scrapy
import re
from scrapy.loader import ItemLoader
from machete.items import VersionItem
class versionSpider(scrapy.Spider):
name = 'versioninspector'
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/71.0.3578.80 Chrome/71.0.3578.80 Safari/537.36',
'FEED_URI': 'acordeones.json',
'FEED_FORMAT': 'json',
'FEED_EXPORT_ENCODING': 'utf-8'
}
start_urls = ['https://reactjs.org']
def parse(self, response):
i = 0
item = ItemLoader(item=VersionItem(), response=response)
item.add_xpath('version', '//header//div/a[@href="/versions"]/text()[2]')
item._add_value('source', self.start_urls[i])
i = i+1
yield item.load_item()
# //header//div/a[@href="/versions"]/text()[2] react
# //h1/text() scrapy
# REGEX DE VERSION \d+\.\d+\.*\d*
# REGEX MEJOR \d{1,2}\.\d{1,2}\.?\d{0,2}
# [\d]{1,2}\.[\d]{1,2}\.{0,1}\d{0,2} | 2.5 | 2 |
thermosteam/_stream.py | yoelcortes/thermodynamics | 0 | 12769511 | # -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2021, <NAME> <<EMAIL>>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
import numpy as np
import thermosteam as tmo
import flexsolve as flx
from warnings import warn
from thermosteam import functional as fn
from . import indexer
from . import equilibrium as eq
from . import units_of_measure as thermo_units
from collections.abc import Iterable
from .exceptions import DimensionError, InfeasibleRegion
from chemicals.elements import array_to_atoms, symbol_to_index
from . import utils
from .constants import g
__all__ = ('Stream', )
# %% Utilities
mol_units = indexer.ChemicalMolarFlowIndexer.units
mass_units = indexer.ChemicalMassFlowIndexer.units
vol_units = indexer.ChemicalVolumetricFlowIndexer.units
class StreamData:
__slots__ = ('_imol', '_T', '_P', '_phases')
def __init__(self, imol, thermal_condition, phases):
self._imol = imol.copy()
self._T = thermal_condition._T
self._P = thermal_condition._P
self._phases = phases
# %%
@utils.units_of_measure(thermo_units.stream_units_of_measure)
@utils.thermo_user
@utils.registered(ticket_name='s')
class Stream:
"""
Create a Stream object that defines material flow rates
along with its thermodynamic state. Thermodynamic and transport
properties of a stream are available as properties, while
thermodynamic equilbrium (e.g. VLE, and bubble and dew points)
are available as methods.
Parameters
----------
ID : str, optional
A unique identification. If ID is None, stream will not be registered.
If no ID is given, stream will be registered with a unique ID.
flow : Iterable[float], optional
All flow rates corresponding to chemical `IDs`.
phase : 'l', 'g', or 's'
Either gas (g), liquid (l), or solid (s). Defaults to 'l'.
T : float
Temperature [K]. Defaults to 298.15.
P : float
Pressure [Pa]. Defaults to 101325.
units : str, optional
Flow rate units of measure (only mass, molar, and
volumetric flow rates are valid). Defaults to 'kmol/hr'.
price : float, optional
Price per unit mass [USD/kg]. Defaults to 0.
total_flow : float, optional
Total flow rate.
thermo : :class:`~thermosteam.Thermo`, optional
Thermo object to initialize input and output streams. Defaults to
`biosteam.settings.get_thermo()`.
characterization_factors : dict, optional
Characterization factors for life cycle assessment.
**chemical_flows : float
ID - flow pairs.
Examples
--------
Before creating a stream, first set the chemicals:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
Create a stream, defining the thermodynamic condition and flow rates:
>>> s1 = tmo.Stream(ID='s1',
... Water=20, Ethanol=10, units='kg/hr',
... T=298.15, P=101325, phase='l')
>>> s1.show(flow='kg/hr') # Use the show method to select units of display
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
>>> s1.show(composition=True, flow='kg/hr') # Its also possible to show by composition
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
composition: Water 0.667
Ethanol 0.333
------- 30 kg/hr
All flow rates are stored as an array in the `mol` attribute:
>>> s1.mol # Molar flow rates [kmol/hr]
array([1.11 , 0.217])
Mass and volumetric flow rates are available as property arrays:
>>> s1.mass
property_array([20.0, 10.0])
>>> s1.vol
property_array([0.02006, 0.012724])
These arrays work just like ordinary arrays, but the data is linked to the molar flows:
>>> # Mass flows are always up to date with molar flows
>>> s1.mol[0] = 1
>>> s1.mass[0]
18.015
>>> # Changing mass flows changes molar flows
>>> s1.mass[0] *= 2
>>> s1.mol[0]
2.0
>>> # Property arrays act just like normal arrays
>>> s1.mass + 2
array([38.031, 12. ])
The temperature, pressure and phase are attributes as well:
>>> (s1.T, s1.P, s1.phase)
(298.15, 101325.0, 'l')
The most convinient way to get and set flow rates is through
the `get_flow` and `set_flow` methods:
>>> # Set flow
>>> s1.set_flow(1, 'gpm', 'Water')
>>> s1.get_flow('gpm', 'Water')
1.0
>>> # Set multiple flows
>>> s1.set_flow([10, 20], 'kg/hr', ('Ethanol', 'Water'))
>>> s1.get_flow('kg/hr', ('Ethanol', 'Water'))
array([10., 20.])
It is also possible to index using IDs through the
`imol`, `imass`, and `ivol` indexers:
>>> s1.imol.show()
ChemicalMolarFlowIndexer (kmol/hr):
(l) Water 1.11
Ethanol 0.2171
>>> s1.imol['Water']
1.1101687012358397
>>> s1.imol['Ethanol', 'Water']
array([0.217, 1.11 ])
Thermodynamic properties are available as stream properties:
>>> s1.H # Enthalpy (kJ/hr)
0.0
Note that the reference enthalpy is 0.0 at the reference
temperature of 298.15 K, and pressure of 101325 Pa.
Retrive the enthalpy at a 10 degC above the reference.
>>> s1.T += 10
>>> s1.H
1083.467954...
Other thermodynamic properties are temperature and pressure dependent as well:
>>> s1.rho # Density [kg/m3]
908.648
It may be more convinient to get properties with different units:
>>> s1.get_property('rho', 'g/cm3')
0.90864
It is also possible to set some of the properties in different units:
>>> s1.set_property('T', 40, 'degC')
>>> s1.T
313.15
Bubble point and dew point computations can be performed through stream methods:
>>> bp = s1.bubble_point_at_P() # Bubble point at constant pressure
>>> bp
BubblePointValues(T=357.09, P=101325, IDs=('Water', 'Ethanol'), z=[0.836 0.164], y=[0.49 0.51])
The bubble point results contain all results as attributes:
>>> bp.T # Temperature [K]
357.088...
>>> bp.y # Vapor composition
array([0.49, 0.51])
Vapor-liquid equilibrium can be performed by setting 2 degrees of freedom from the following list: `T` [Temperature; in K], `P` [Pressure; in Pa], `V` [Vapor fraction], `H` [Enthalpy; in kJ/hr].
Set vapor fraction and pressure of the stream:
>>> s1.vle(P=101325, V=0.5)
>>> s1.show()
MultiStream: s1
phases: ('g', 'l'), T: 364.8 K, P: 101325 Pa
flow (kmol/hr): (g) Water 0.472
Ethanol 0.192
(l) Water 0.638
Ethanol 0.0255
Note that the stream is a now a MultiStream object to manage multiple phases.
Each phase can be accessed separately too:
>>> s1['l'].show()
Stream:
phase: 'l', T: 364.8 K, P: 101325 Pa
flow (kmol/hr): Water 0.638
Ethanol 0.0255
>>> s1['g'].show()
Stream:
phase: 'g', T: 364.8 K, P: 101325 Pa
flow (kmol/hr): Water 0.472
Ethanol 0.192
We can convert a MultiStream object back to a Stream object by setting the phase:
>>> s1.phase = 'l'
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 364.8 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
"""
__slots__ = (
'_ID', '_imol', '_thermal_condition', '_thermo', '_streams',
'_bubble_point_cache', '_dew_point_cache',
'_vle_cache', '_lle_cache', '_sle_cache',
'_sink', '_source', '_price', '_islinked', '_property_cache_key',
'_property_cache', 'characterization_factors', '_user_equilibrium',
# '_velocity', '_height'
)
line = 'Stream'
#: [DisplayUnits] Units of measure for IPython display (class attribute)
display_units = thermo_units.DisplayUnits(T='K', P='Pa',
flow=('kmol/hr', 'kg/hr', 'm3/hr'),
composition=False,
N=7)
_units_of_measure = thermo_units.stream_units_of_measure
_flow_cache = {}
def __init__(self, ID= '', flow=(), phase='l', T=298.15, P=101325.,
units=None, price=0., total_flow=None, thermo=None,
characterization_factors=None,
# velocity=0., height=0.,
**chemical_flows):
#: dict[obj, float] Characterization factors for life cycle assessment in impact / kg.
self.characterization_factors = {} if characterization_factors is None else {}
self._thermal_condition = tmo.ThermalCondition(T, P)
thermo = self._load_thermo(thermo)
chemicals = thermo.chemicals
self.price = price
# self.velocity = velocity
# self.height = height
if units:
name, factor = self._get_flow_name_and_factor(units)
if name == 'mass':
group_wt_compositions = chemicals._group_wt_compositions
for cID in tuple(chemical_flows):
if cID in group_wt_compositions:
compositions = group_wt_compositions[cID]
group_flow = chemical_flows.pop(cID)
chemical_group = chemicals[cID]
for i in range(len(chemical_group)):
chemical_flows[chemical_group[i]._ID] = group_flow * compositions[i]
elif name == 'vol':
group_wt_compositions = chemicals._group_wt_compositions
for cID in chemical_flows:
if cID in group_wt_compositions:
raise ValueError(f"cannot set volumetric flow by chemical group '{i}'")
self._init_indexer(flow, phase, chemicals, chemical_flows)
mol = self.mol
flow = getattr(self, name)
if total_flow is not None: mol *= total_flow / mol.sum()
material_data = mol / factor
flow[:] = material_data
else:
self._init_indexer(flow, phase, chemicals, chemical_flows)
if total_flow:
mol = self.mol
mol *= total_flow / mol.sum()
self._sink = self._source = None # For BioSTEAM
self.reset_cache()
self._register(ID)
self._islinked = False
self._user_equilibrium = None
def reset_flow(self, phase=None, units=None, total_flow=None, **chemical_flows):
"""
Convinience method for resetting flow rate data.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=1)
>>> s1.reset_flow(Ethanol=1, phase='g', units='kg/hr', total_flow=2)
>>> s1.show('cwt')
Stream: s1
phase: 'g', T: 298.15 K, P: 101325 Pa
composition: Ethanol 1
------- 2 kg/hr
"""
imol = self._imol
imol.empty()
if phase: imol.phase = phase
if chemical_flows:
keys, values = zip(*chemical_flows.items())
if units is None:
self.imol[keys] = values
else:
self.set_flow(values, units, keys)
if total_flow:
if units is None:
self.F_mol = total_flow
else:
self.set_total_flow(total_flow, units)
def _reset_thermo(self, thermo):
if thermo is self._thermo: return
self._thermo = thermo
self._imol.reset_chemicals(thermo.chemicals)
self._islinked = False
self.reset_cache()
if hasattr(self, '_streams'):
for phase, stream in self._streams.items():
stream._imol = self._imol.get_phase(phase)
stream._thermo = thermo
def user_equilibrium(self, *args, **kwargs):
return self._user_equilibrium(self, *args, **kwargs)
def set_user_equilibrium(self, f):
self._user_equilibrium = f
@property
def has_user_equilibrium(self):
return self._user_equilibrium is not None
def get_CF(self, key, units=None):
"""
Returns the life-cycle characterization factor on a kg basis given the
impact indicator key.
Parameters
----------
key : str
Key of impact indicator.
units : str, optional
Units of impact indicator. Before using this argument, the default units
of the impact indicator should be defined with
thermosteam.settings.define_impact_indicator.
Units must also be dimensionally consistent with the default units.
"""
try:
value = self.characterization_factors[key]
except:
return 0.
if units is not None:
original_units = tmo.settings.get_impact_indicator_units(key)
value = original_units.convert(value, units)
return value
def set_CF(self, key, value, units=None):
"""
Set the life-cycle characterization factor on a kg basis given the
impact indicator key and the units of measure.
Parameters
----------
key : str
Key of impact indicator.
value : float
Characterization factor value.
units : str, optional
Units of impact indicator. Before using this argument, the default units
of the impact indicator should be defined with
thermosteam.settings.define_impact_indicator.
Units must also be dimensionally consistent with the default units.
"""
if units is not None:
original_units = tmo.settings.get_impact_indicator_units(key)
value = original_units.unconvert(value, units)
self.characterization_factors[key] = value
def get_impact(self, key):
"""Return hourly rate of the impact indicator given the key."""
cfs = self.characterization_factors
return cfs[key] * self.F_mass if key in cfs else 0.
def empty_negative_flows(self):
"""
Replace flows of all components with negative values with 0.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=1, Ethanol=-1)
>>> s1.empty_negative_flows()
>>> s1.show()
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 1
"""
data = self._imol._data
data[data < 0.] = 0.
def shares_flow_rate_with(self, other):
"""
Return whether other stream shares data with this one.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1')
>>> other = s1.flow_proxy()
>>> s1.shares_flow_rate_with(other)
True
>>> s1 = tmo.MultiStream('s1', phases=('l', 'g'))
>>> s1['g'].shares_flow_rate_with(s1)
True
>>> s2 = tmo.MultiStream('s2', phases=('l', 'g'))
>>> s1['g'].shares_flow_rate_with(s2)
False
>>> s1['g'].shares_flow_rate_with(s2['g'])
False
"""
imol = self._imol
other_imol = other._imol
if imol.__class__ is other_imol.__class__ and imol._data is other_imol._data:
shares_data = True
elif isinstance(other, tmo.MultiStream):
phase = self.phase
substreams = other._streams
if phase in substreams:
substream = substreams[phase]
shares_data = self.shares_flow_rate_with(substream)
else:
shares_data = False
else:
shares_data = False
return shares_data
def as_stream(self):
"""Does nothing."""
def get_data(self):
"""
Return a StreamData object containing data on material flow rates,
temperature, pressure, and phase(s).
See Also
--------
Stream.set_data
Examples
--------
Get and set data from stream at different conditions
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream('stream', Water=10)
>>> data = stream.get_data()
>>> stream.vle(V=0.5, P=101325)
>>> data_vle = stream.get_data()
>>> stream.set_data(data)
>>> stream.show()
Stream: stream
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 10
>>> stream.set_data(data_vle)
>>> stream.show()
MultiStream: stream
phases: ('g', 'l'), T: 373.12 K, P: 101325 Pa
flow (kmol/hr): (g) Water 5
(l) Water 5
Note that only StreamData objects are valid for this method:
>>> stream.set_data({'T': 298.15})
Traceback (most recent call last):
ValueError: stream_data must be a StreamData object; not dict
"""
return StreamData(self._imol, self._thermal_condition, self.phases)
def set_data(self, stream_data):
"""
Set material flow rates, temperature, pressure, and phase(s) through a
StreamData object
See Also
--------
Stream.get_data
"""
if isinstance(stream_data, StreamData):
self.phases = stream_data._phases
self._imol.copy_like(stream_data._imol)
self._thermal_condition.copy_like(stream_data)
else:
raise ValueError(f'stream_data must be a StreamData object; not {type(stream_data).__name__}')
@property
def price(self):
"""[float] Price of stream per unit mass [USD/kg]."""
return self._price
@price.setter
def price(self, price):
if np.isfinite(price):
self._price = float(price)
else:
raise AttributeError(f'price must be finite, not {price}')
# @property
# def velocity(self):
# """[float] Velocity of stream [m/s]."""
# return self._velocity
# @velocity.setter
# def velocity(self, velocity):
# if np.isfinite(velocity):
# self._velocity = float(velocity)
# else:
# raise AttributeError(f'velocity must be finite, not {velocity}')
# @property
# def height(self):
# """[float] Relative height of stream [m]."""
# return self._height
# @height.setter
# def height(self, height):
# if np.isfinite(height):
# self._height = float(height)
# else:
# raise AttributeError(f'height must be finite, not {height}')
# @property
# def potential_energy(self):
# """[float] Potential energy flow rate [kW]"""
# return (g * self.height * self.F_mass) / 3.6e6
# @property
# def kinetic_energy(self):
# """[float] Kinetic energy flow rate [kW]"""
# return 0.5 * self.F_mass / 3.6e6 * self._velocity * self._velocity
def isempty(self):
"""
Return whether or not stream is empty.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream()
>>> stream.isempty()
True
"""
return self._imol.isempty()
def sanity_check(self):
"""
Raise an InfeasibleRegion error if flow rates are infeasible.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1')
>>> s1.sanity_check()
>>> s1.mol[0] = -1.
>>> s1.sanity_check()
Traceback (most recent call last):
InfeasibleRegion: negative material flow rate is infeasible
"""
material = self._imol._data
if material[material < 0.].any(): raise InfeasibleRegion('negative material flow rate')
@property
def vapor_fraction(self):
"""Molar vapor fraction."""
return 1.0 if self.phase in 'gG' else 0.0
@property
def liquid_fraction(self):
"""Molar liquid fraction."""
return 1.0 if self.phase in 'lL' else 0.0
@property
def solid_fraction(self):
"""Molar solid fraction."""
return 1.0 if self.phase in 'sS' else 0.0
def isfeed(self):
"""Return whether stream has a sink but no source."""
return bool(self._sink and not self._source)
def isproduct(self):
"""Return whether stream has a source but no sink."""
return bool(self._source and not self._sink)
@property
def main_chemical(self):
"""[str] ID of chemical with the largest mol fraction in stream."""
return self.chemicals.tuple[self.mol.argmax()].ID
def disconnect_source(self):
"""Disconnect stream from source."""
source = self._source
if source:
outs = source.outs
index = outs.index(self)
outs[index] = None
def disconnect_sink(self):
"""Disconnect stream from sink."""
sink = self._sink
if sink:
ins = sink.ins
index = ins.index(self)
ins[index] = None
def disconnect(self):
"""Disconnect stream from unit operations."""
self.disconnect_source()
self.disconnect_sink()
def _init_indexer(self, flow, phase, chemicals, chemical_flows):
"""Initialize molar flow rates."""
if len(flow) == 0:
if chemical_flows:
imol = indexer.ChemicalMolarFlowIndexer(phase, chemicals=chemicals, **chemical_flows)
else:
imol = indexer.ChemicalMolarFlowIndexer.blank(phase, chemicals)
else:
assert not chemical_flows, ("may specify either 'flow' or "
"'chemical_flows', but not both")
if isinstance(flow, indexer.ChemicalMolarFlowIndexer):
imol = flow
imol.phase = phase
else:
imol = indexer.ChemicalMolarFlowIndexer.from_data(
np.asarray(flow, dtype=float), phase, chemicals)
self._imol = imol
def reset_cache(self):
"""Reset cache regarding equilibrium methods."""
self._bubble_point_cache = eq.BubblePointCache()
self._dew_point_cache = eq.DewPointCache()
self._property_cache_key = None, None, None
self._property_cache = {}
@classmethod
def _get_flow_name_and_factor(cls, units):
cache = cls._flow_cache
if units in cache:
name, factor = cache[units]
else:
dimensionality = thermo_units.get_dimensionality(units)
if dimensionality == mol_units.dimensionality:
name = 'mol'
factor = mol_units.conversion_factor(units)
elif dimensionality == mass_units.dimensionality:
name = 'mass'
factor = mass_units.conversion_factor(units)
elif dimensionality == vol_units.dimensionality:
name = 'vol'
factor = vol_units.conversion_factor(units)
else:
raise DimensionError("dimensions for flow units must be in molar, "
"mass or volumetric flow rates, not "
f"'{dimensionality}'")
cache[units] = name, factor
return name, factor
### Property getters ###
def get_atomic_flow(self, symbol):
"""
Return flow rate of atom in kmol / hr given the atomic symbol.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream(Water=1)
>>> stream.get_atomic_flow('H') # kmol/hr of H
2.0
>>> stream.get_atomic_flow('O') # kmol/hr of O
1.0
"""
return (self.chemicals.formula_array[symbol_to_index[symbol], :] * self.mol).sum()
def get_atomic_flows(self):
"""
Return dictionary of atomic flow rates in kmol / hr.
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream(Water=1)
>>> stream.get_atomic_flows()
{'H': 2.0, 'O': 1.0}
"""
return array_to_atoms(self.chemicals.formula_array @ self.mol)
def get_flow(self, units, key=...):
"""
Return an flow rates in requested units.
Parameters
----------
units : str
Units of measure.
key : tuple[str] or str, optional
Chemical identifiers.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.get_flow('kg/hr', 'Water')
20.0
"""
name, factor = self._get_flow_name_and_factor(units)
indexer = getattr(self, 'i' + name)
return factor * indexer[key]
def set_flow(self, data, units, key=...):
"""
Set flow rates in given units.
Parameters
----------
data : 1d ndarray or float
Flow rate data.
units : str
Units of measure.
key : Iterable[str] or str, optional
Chemical identifiers.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream(ID='s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.set_flow(10, 'kg/hr', 'Water')
>>> s1.get_flow('kg/hr', 'Water')
10.0
"""
name, factor = self._get_flow_name_and_factor(units)
indexer = getattr(self, 'i' + name)
indexer[key] = np.asarray(data, dtype=float) / factor
def get_total_flow(self, units):
"""
Get total flow rate in given units.
Parameters
----------
units : str
Units of measure.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.get_total_flow('kg/hr')
30.0
"""
name, factor = self._get_flow_name_and_factor(units)
flow = getattr(self, 'F_' + name)
return factor * flow
def set_total_flow(self, value, units):
"""
Set total flow rate in given units keeping the composition constant.
Parameters
----------
value : float
New total flow rate.
units : str
Units of measure.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.set_total_flow(1.0,'kg/hr')
>>> s1.get_total_flow('kg/hr')
0.9999999999999999
"""
name, factor = self._get_flow_name_and_factor(units)
setattr(self, 'F_' + name, value / factor)
### Stream data ###
@property
def source(self):
"""[Unit] Outlet location."""
return self._source
@property
def sink(self):
"""[Unit] Inlet location."""
return self._sink
@property
def thermal_condition(self):
"""
[ThermalCondition] Contains the temperature and pressure conditions
of the stream.
"""
return self._thermal_condition
@property
def T(self):
"""[float] Temperature in Kelvin."""
return self._thermal_condition._T
@T.setter
def T(self, T):
self._thermal_condition._T = float(T)
@property
def P(self):
"""[float] Pressure in Pascal."""
return self._thermal_condition._P
@P.setter
def P(self, P):
self._thermal_condition._P = float(P)
@property
def phase(self):
"""Phase of stream."""
return self._imol._phase._phase
@phase.setter
def phase(self, phase):
self._imol._phase.phase = phase
@property
def mol(self):
"""[array] Molar flow rates in kmol/hr."""
return self._imol._data
@mol.setter
def mol(self, value):
mol = self.mol
if mol is not value: mol[:] = value
@property
def mass(self):
"""[property_array] Mass flow rates in kg/hr."""
return self.imass._data
@mass.setter
def mass(self, value):
mass = self.mass
if mass is not value: mass[:] = value
@property
def vol(self):
"""[property_array] Volumetric flow rates in m3/hr."""
return self.ivol._data
@vol.setter
def vol(self, value):
vol = self.vol
if vol is not value:
vol[:] = value
@property
def imol(self):
"""[Indexer] Flow rate indexer with data in kmol/hr."""
return self._imol
@property
def imass(self):
"""[Indexer] Flow rate indexer with data in kg/hr."""
return self._imol.by_mass()
@property
def ivol(self):
"""[Indexer] Flow rate indexer with data in m3/hr."""
return self._imol.by_volume(self._thermal_condition)
### Net flow properties ###
@property
def cost(self):
"""[float] Total cost of stream in USD/hr."""
return self.price * self.F_mass
@property
def F_mol(self):
"""[float] Total molar flow rate in kmol/hr."""
return self._imol._data.sum()
@F_mol.setter
def F_mol(self, value):
F_mol = self.F_mol
if not F_mol: raise AttributeError("undefined composition; cannot set flow rate")
self._imol._data[:] *= value/F_mol
@property
def F_mass(self):
"""[float] Total mass flow rate in kg/hr."""
return np.dot(self.chemicals.MW, self.mol)
@F_mass.setter
def F_mass(self, value):
F_mass = self.F_mass
if not F_mass: raise AttributeError("undefined composition; cannot set flow rate")
self.imol._data[:] *= value/F_mass
@property
def F_vol(self):
"""[float] Total volumetric flow rate in m3/hr."""
F_mol = self.F_mol
return 1000. * self.V * F_mol if F_mol else 0.
@F_vol.setter
def F_vol(self, value):
F_vol = self.F_vol
if not F_vol: raise AttributeError("undefined composition; cannot set flow rate")
self.imol._data[:] *= value / F_vol
@property
def H(self):
"""[float] Enthalpy flow rate in kJ/hr."""
H = self._get_property_cache('H', True)
if H is None:
self._property_cache['H'] = H = self.mixture.H(
self.phase, self.mol, *self._thermal_condition
)
return H
@H.setter
def H(self, H: float):
if not H and self.isempty(): return
try: self.T = self.mixture.solve_T(self.phase, self.mol, H,
*self._thermal_condition)
except Exception as error: # pragma: no cover
phase = self.phase.lower()
if phase == 'g':
# Maybe too little heat, liquid must be present
self.phase = 'l'
elif phase == 'l':
# Maybe too much heat, gas must be present
self.phase = 'g'
else:
raise error
self.T = self.mixture.solve_T(self.phase, self.mol, H,
*self._thermal_condition)
@property
def S(self):
"""[float] Absolute entropy flow rate in kJ/hr."""
S = self._get_property_cache('S', True)
if S is None:
self._property_cache['S'] = S = self.mixture.S(
self.phase, self.mol, *self._thermal_condition
)
return S
@property
def Hnet(self):
"""[float] Total enthalpy flow rate (including heats of formation) in kJ/hr."""
return self.H + self.Hf
@property
def Hf(self):
"""[float] Enthalpy of formation flow rate in kJ/hr."""
return (self.chemicals.Hf * self.mol).sum()
@property
def LHV(self):
"""[float] Lower heating value flow rate in kJ/hr."""
return (self.chemicals.LHV * self.mol).sum()
@property
def HHV(self):
"""[float] Higher heating value flow rate in kJ/hr."""
return (self.chemicals.HHV * self.mol).sum()
@property
def Hvap(self):
"""[float] Enthalpy of vaporization flow rate in kJ/hr."""
mol = self.mol
T = self._thermal_condition._T
Hvap = self._get_property_cache('Hvap', True)
if Hvap is None:
self._property_cache['Hvap'] = Hvap = sum([
i*j.Hvap(T) for i,j in zip(mol, self.chemicals)
if i and not j.locked_state
])
return Hvap
def _get_property_cache(self, name, flow=False):
property_cache = self._property_cache
thermal_condition = self._thermal_condition
imol = self._imol
data = imol._data
total = data.sum()
if total == 0.: return 0.
composition = data / total
literal = (imol._phase._phase, thermal_condition._T, thermal_condition._P)
last_literal, last_composition, last_total = self._property_cache_key
if literal == last_literal and (composition == last_composition).all():
prop = property_cache.get(name)
if not prop: return prop
if flow:
return prop * total / last_total
else:
return prop
else:
self._property_cache_key = (literal, composition, total)
property_cache.clear()
return None
@property
def C(self):
"""[float] Heat capacity flow rate in kJ/hr."""
C = self._get_property_cache('C', True)
if C is None:
self._property_cache['C'] = C = self.mixture.Cn(self.phase, self.mol, self.T)
return C
### Composition properties ###
@property
def z_mol(self):
"""[1d array] Molar composition."""
mol = self.mol
z = mol / mol.sum()
z.setflags(0)
return z
@property
def z_mass(self):
"""[1d array] Mass composition."""
mass = self.chemicals.MW * self.mol
F_mass = mass.sum()
if F_mass == 0:
z = mass
else:
z = mass / mass.sum()
z.setflags(0)
return z
@property
def z_vol(self):
"""[1d array] Volumetric composition."""
vol = 1. * self.vol
z = vol / vol.sum()
z.setflags(0)
return z
@property
def MW(self):
"""[float] Overall molecular weight."""
return self.mixture.MW(self.mol)
@property
def V(self):
"""[float] Molar volume [m^3/mol]."""
V = self._get_property_cache('V')
if V is None:
self._property_cache['V'] = V = self.mixture.V(
*self._imol.get_phase_and_composition(),
*self._thermal_condition
)
return V
@property
def kappa(self):
"""[float] Thermal conductivity [W/m/k]."""
kappa = self._get_property_cache('kappa')
if kappa is None:
self._property_cache['kappa'] = kappa = self.mixture.kappa(
*self._imol.get_phase_and_composition(),
*self._thermal_condition
)
return kappa
@property
def Cn(self):
"""[float] Molar heat capacity [J/mol/K]."""
Cn = self._get_property_cache('Cn')
if Cn is None:
self._property_cache['Cn'] = Cn = self.mixture.Cn(
*self._imol.get_phase_and_composition(),
self.T
)
return Cn
@property
def mu(self):
"""[float] Hydrolic viscosity [Pa*s]."""
mu = self._get_property_cache('mu')
if mu is None:
self._property_cache['mu'] = mu = self.mixture.mu(
*self._imol.get_phase_and_composition(),
*self._thermal_condition
)
return mu
@property
def sigma(self):
"""[float] Surface tension [N/m]."""
mol = self.mol
sigma = self._get_property_cache('sigma')
if sigma is None:
self._property_cache['sigma'] = sigma = self.mixture.sigma(
mol / mol.sum(), *self._thermal_condition
)
return sigma
@property
def epsilon(self):
"""[float] Relative permittivity [-]."""
mol = self.mol
epsilon = self._get_property_cache('epsilon')
if epsilon is None:
self._property_cache['epsilon'] = epsilon = self.mixture.epsilon(
mol / mol.sum(), *self._thermal_condition
)
return epsilon
@property
def Cp(self):
"""[float] Heat capacity [J/g/K]."""
return self.Cn / self.MW
@property
def alpha(self):
"""[float] Thermal diffusivity [m^2/s]."""
return fn.alpha(self.kappa,
self.rho,
self.Cp * 1000.)
@property
def rho(self):
"""[float] Density [kg/m^3]."""
return fn.V_to_rho(self.V, self.MW)
@property
def nu(self):
"""[float] Kinematic viscosity [m^2/s]."""
return fn.mu_to_nu(self.mu, self.rho)
@property
def Pr(self):
"""[float] Prandtl number [-]."""
return fn.Pr(self.Cp * 1000,
self.kappa,
self.mu)
### Stream methods ###
@property
def available_chemicals(self):
"""list[Chemical] All chemicals with nonzero flow."""
return [i for i, j in zip(self.chemicals, self.mol) if j]
def in_thermal_equilibrium(self, other):
"""
Return whether or not stream is in thermal equilibrium with
another stream.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> stream = Stream(Water=1, T=300)
>>> other = Stream(Water=1, T=300)
>>> stream.in_thermal_equilibrium(other)
True
"""
return self._thermal_condition.in_equilibrium(other._thermal_condition)
@classmethod
def sum(cls, streams, ID=None, thermo=None, energy_balance=True):
"""
Return a new Stream object that represents the sum of all given streams.
Examples
--------
Sum two streams:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s_sum = tmo.Stream.sum([s1, s1], 's_sum')
>>> s_sum.show(flow='kg/hr')
Stream: s_sum
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
Sum two streams with new property package:
>>> thermo = tmo.Thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s_sum = tmo.Stream.sum([s1, s1], 's_sum', thermo)
>>> s_sum.show(flow='kg/hr')
Stream: s_sum
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
"""
new = cls(ID, thermo=thermo)
if streams: new.copy_thermal_condition(streams[0])
new.mix_from(streams, energy_balance)
return new
def separate_out(self, other, energy_balance=True):
"""
Separate out given stream from this one.
Examples
--------
Separate out another stream with the same thermodynamic property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=30, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2', Water=10, Ethanol=5, units='kg/hr')
>>> s1.separate_out(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 5
It's also possible to separate out streams with different property packages
so long as all chemicals are defined in the mixed stream's property
package:
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1', Water=40, units='kg/hr')
>>> tmo.settings.set_thermo(['Ethanol'], cache=True)
>>> s2 = tmo.Stream('s2', Ethanol=20, units='kg/hr')
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s_mix = tmo.Stream.sum([s1, s2], 's_mix')
>>> s_mix.separate_out(s2)
>>> s_mix.show(flow='kg/hr')
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Removing empty streams is fine too:
>>> s1.empty(); s_mix.separate_out(s1)
>>> s_mix.show(flow='kg/hr')
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
"""
if other:
if self is other: self.empty()
if energy_balance: H_new = self.H - other.H
self._imol.separate_out(other._imol)
if energy_balance: self.H = H_new
def mix_from(self, others, energy_balance=True, vle=False):
"""
Mix all other streams into this one, ignoring its initial contents.
Examples
--------
Mix two streams with the same thermodynamic property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = s1.copy('s2')
>>> s1.mix_from([s1, s2])
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
It's also possible to mix streams with different property packages
so long as all chemicals are defined in the mixed stream's property
package:
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1', Water=40, units='kg/hr')
>>> tmo.settings.set_thermo(['Ethanol'], cache=True)
>>> s2 = tmo.Stream('s2', Ethanol=20, units='kg/hr')
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s_mix = tmo.Stream('s_mix')
>>> s_mix.mix_from([s1, s2])
>>> s_mix.show(flow='kg/hr')
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
Mixing empty streams is fine too:
>>> s1.empty(); s2.empty(); s_mix.mix_from([s1, s2])
>>> s_mix.show()
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow: 0
"""
others = [i for i in others if i]
N_others = len(others)
if N_others == 0:
self.empty()
elif N_others == 1:
self.copy_like(others[0])
elif vle:
phases = ''.join([i.phase for i in others])
self.phases = tuple(set(phases))
self._imol.mix_from([i._imol for i in others])
if energy_balance:
H = sum([i.H for i in others])
self.vle(H=self.H, P=self.P)
else:
self.vle(T=self.T, P=self.P)
else:
self.P = min([i.P for i in others])
if energy_balance: H = sum([i.H for i in others])
self._imol.mix_from([i._imol for i in others])
if energy_balance and not self.isempty():
try:
self.H = H
except:
phases = ''.join([i.phase for i in others])
self.phases = tuple(set(phases))
self._imol.mix_from([i._imol for i in others])
self.H = H
def split_to(self, s1, s2, split, energy_balance=True):
"""
Split molar flow rate from this stream to two others given
the split fraction or an array of split fractions.
Examples
--------
>>> import thermosteam as tmo
>>> chemicals = tmo.Chemicals(['Water', 'Ethanol'], cache=True)
>>> tmo.settings.set_thermo(chemicals)
>>> s = tmo.Stream('s', Water=20, Ethanol=10, units='kg/hr')
>>> s1 = tmo.Stream('s1')
>>> s2 = tmo.Stream('s2')
>>> split = chemicals.kwarray(dict(Water=0.5, Ethanol=0.1))
>>> s.split_to(s1, s2, split)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 10
Ethanol 1
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 10
Ethanol 9
"""
mol = self.mol
chemicals = self.chemicals
values = mol * split
dummy = mol - values
if s1.chemicals is chemicals:
s1.mol[:] = values
else:
CASs, values = zip(*[(i, j) for i, j in zip(chemicals.CASs, values) if j])
s1.empty()
s1._imol[CASs] = values
values = dummy
if s2.chemicals is chemicals:
s2.mol[:] = values
else:
s2.empty()
CASs, values = zip(*[(i, j) for i, j in zip(chemicals.CASs, values) if j])
s2._imol[CASs] = values
if energy_balance:
tc1 = s1._thermal_condition
tc2 = s2._thermal_condition
tc = self._thermal_condition
tc1._T = tc2._T = tc._T
tc1._P = tc2._P = tc._P
s1.phase = s2.phase = self.phase
def link_with(self, other, flow=True, phase=True, TP=True):
"""
Link with another stream.
Parameters
----------
other : Stream
flow : bool, defaults to True
Whether to link the flow rate data.
phase : bool, defaults to True
Whether to link the phase.
TP : bool, defaults to True
Whether to link the temperature and pressure.
See Also
--------
:obj:`~Stream.flow_proxy`
:obj:`~Stream.proxy`
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
>>> s2.link_with(s1)
>>> s1.mol is s2.mol
True
>>> s2.thermal_condition is s1.thermal_condition
True
>>> s1.phase = 'g'
>>> s2.phase
'g'
"""
if not isinstance(other._imol, self._imol.__class__):
at_unit = f" at unit {self.source}" if self.source is other.sink else ""
raise RuntimeError(f"stream {self} cannot link with stream {other}" + at_unit
+ "; streams must have the same class to link")
if self._islinked and not (self.source is other.sink or self.sink is other.source):
raise RuntimeError(f"stream {self} cannot link with stream {other};"
f" {self} already linked")
if TP and flow and (phase or self._imol._data.ndim == 2):
self._imol._data_cache = other._imol._data_cache
else:
self._imol._data_cache.clear()
if TP:
self._thermal_condition = other._thermal_condition
if flow:
self._imol._data = other._imol._data
if phase and self._imol._data.ndim == 1:
self._imol._phase = other._imol._phase
self._islinked = other._islinked = True
def unlink(self):
"""
Unlink stream from other streams.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
>>> s2.link_with(s1)
>>> s1.unlink()
>>> s2.mol is s1.mol
False
MultiStream phases cannot be unlinked:
>>> s1 = tmo.MultiStream(None, phases=('l', 'g'))
>>> s1['g'].unlink()
Traceback (most recent call last):
RuntimeError: phase is locked; stream cannot be unlinked
"""
imol = self._imol
if hasattr(imol, '_phase') and isinstance(imol._phase, tmo._phase.LockedPhase):
raise RuntimeError('phase is locked; stream cannot be unlinked')
if self._islinked:
imol._data_cache.clear()
imol._data = imol._data.copy()
imol._phase = imol._phase.copy()
self._thermal_condition = self._thermal_condition.copy()
self.reset_cache()
self._islinked = False
def copy_like(self, other):
"""
Copy all conditions of another stream.
Examples
--------
Copy data from another stream with the same property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2', Water=2, units='kg/hr')
>>> s1.copy_like(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 2
Copy data from another stream with a different property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s2 = tmo.Stream('s2', Water=2, units='kg/hr')
>>> s1.copy_like(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 2
"""
if isinstance(other, tmo.MultiStream):
phase = other.phase
if len(phase) == 1:
imol = other._imol.to_chemical_indexer(phase)
else:
self.phases = other.phases
imol = other._imol
else:
imol = other._imol
self._imol.copy_like(imol)
self._thermal_condition.copy_like(other._thermal_condition)
def copy_thermal_condition(self, other):
"""
Copy thermal conditions (T and P) of another stream.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=2, units='kg/hr')
>>> s2 = tmo.Stream('s2', Water=1, units='kg/hr', T=300.00)
>>> s1.copy_thermal_condition(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 300 K, P: 101325 Pa
flow (kg/hr): Water 2
"""
self._thermal_condition.copy_like(other._thermal_condition)
def copy_flow(self, other, IDs=..., *, remove=False, exclude=False):
"""
Copy flow rates of another stream to self.
Parameters
----------
other : Stream
Flow rates will be copied from here.
IDs=... : Iterable[str], defaults to all chemicals.
Chemical IDs.
remove=False: bool, optional
If True, copied chemicals will be removed from `stream`.
exclude=False: bool, optional
If True, exclude designated chemicals when copying.
Examples
--------
Initialize streams:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
Copy all flows:
>>> s2.copy_flow(s1)
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
Reset and copy just water flow:
>>> s2.empty()
>>> s2.copy_flow(s1, 'Water')
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Reset and copy all flows except water:
>>> s2.empty()
>>> s2.copy_flow(s1, 'Water', exclude=True)
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Ethanol 10
Cut and paste flows:
>>> s2.copy_flow(s1, remove=True)
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
>>> s1.show()
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow: 0
Its also possible to copy flows from a multistream:
>>> s1.phases = ('g', 'l')
>>> s1.imol['g', 'Water'] = 10
>>> s2.copy_flow(s1, remove=True)
>>> s2.show()
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 10
>>> s1.show()
MultiStream: s1
phases: ('g', 'l'), T: 298.15 K, P: 101325 Pa
flow: 0
Copy flows except except water and remove water:
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
>>> s2.copy_flow(s1, 'Water', exclude=True, remove=True)
"""
other_mol = other.mol
other_chemicals = other.chemicals
chemicals = self.chemicals
if IDs == ...:
if exclude: return
if chemicals is other_chemicals:
self.mol[:] = other.mol
else:
self.empty()
CASs, values = zip(*[(i, j) for i, j in zip(other_chemicals.CASs, other_mol) if j])
self.imol[CASs] = values
if remove:
if isinstance(other, tmo.MultiStream):
other.imol.data[:] = 0.
else:
other_mol[:] = 0.
else:
if exclude:
if isinstance(IDs, str):
if IDs in other_chemicals:
bad_index = other_chemicals.index(IDs)
other_index = [i for i in range(other_chemicals.size) if i != bad_index]
else:
other_index = slice()
else:
IDs = [i for i in IDs if i in other_chemicals]
bad_index = set(other_chemicals.indices(IDs))
if bad_index:
other_index = [i for i in range(other_chemicals.size) if i not in bad_index]
else:
other_index = slice()
else:
other_index = other_chemicals.get_index(IDs)
if chemicals is other_chemicals:
self.mol[other_index] = other_mol[other_index]
else:
CASs = other_chemicals.CASs
other_index = [i for i in other_index if other_mol[i] or CASs[i] in chemicals]
self.imol[tuple([CASs[i] for i in other_index])] = other_mol[other_index]
if remove:
if isinstance(other, tmo.MultiStream):
other.imol.data[:, other_index] = 0
else:
other_mol[other_index] = 0
def copy(self, ID=None, thermo=None):
"""
Return a copy of the stream.
Examples
--------
Create a copy of a new stream:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1_copy = s1.copy('s1_copy')
>>> s1_copy.show(flow='kg/hr')
Stream: s1_copy
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
Warnings
--------
Prices, LCA characterization factors are not copied are not copied.
"""
cls = self.__class__
new = cls.__new__(cls)
new._islinked = False
new._sink = new._source = None
new.characterization_factors = {}
new._thermo = thermo or self._thermo
new._imol = self._imol.copy()
if thermo and thermo.chemicals is not self.chemicals:
new._imol.reset_chemicals(thermo.chemicals)
new._thermal_condition = self._thermal_condition.copy()
new._user_equilibrium = self._user_equilibrium
new.reset_cache()
new.price = 0
new.ID = ID
return new
__copy__ = copy
def flow_proxy(self, ID=None):
"""
Return a new stream that shares flow rate data with this one.
See Also
--------
:obj:`~Stream.link_with`
:obj:`~Stream.proxy`
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = s1.flow_proxy()
>>> s2.mol is s1.mol
True
"""
cls = self.__class__
new = cls.__new__(cls)
new.ID = new._sink = new._source = None
new.price = 0
new._thermo = self._thermo
new._imol = imol = self._imol._copy_without_data()
imol._data = self._imol._data
new._thermal_condition = self._thermal_condition.copy()
new.reset_cache()
new.characterization_factors = {}
self._islinked = new._islinked = True
new._user_equilibrium = self._user_equilibrium
return new
def proxy(self, ID=None):
"""
Return a new stream that shares all thermochemical data with this one.
See Also
--------
:obj:`~Stream.link_with`
:obj:`~Stream.flow_proxy`
Warning
-------
Price and characterization factor data is not shared
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = s1.proxy()
>>> s2.imol is s1.imol and s2.thermal_condition is s1.thermal_condition
True
"""
cls = self.__class__
new = cls.__new__(cls)
new.ID = None
new._sink = new._source = None
new.price = self.price
new._thermo = self._thermo
new._imol = self._imol
new._thermal_condition = self._thermal_condition
new._property_cache = self._property_cache
new._property_cache_key = self._property_cache_key
new._bubble_point_cache = self._bubble_point_cache
new._dew_point_cache = self._dew_point_cache
new._user_equilibrium = self._user_equilibrium
try: new._vle_cache = self._vle_cache
except AttributeError: pass
new.characterization_factors = {}
self._islinked = new._islinked = True
return new
def empty(self):
"""Empty stream flow rates.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.empty()
>>> s1.F_mol
0.0
"""
self._imol._data[:] = 0.
### Equilibrium ###
@property
def vle(self):
"""[VLE] An object that can perform vapor-liquid equilibrium on the stream."""
self.phases = ('g', 'l')
return self.vle
@property
def lle(self):
"""[LLE] An object that can perform liquid-liquid equilibrium on the stream."""
self.phases = ('L', 'l')
return self.lle
@property
def sle(self):
"""[SLE] An object that can perform solid-liquid equilibrium on the stream."""
self.phases = ('s', 'l')
return self.sle
@property
def vle_chemicals(self):
"""list[Chemical] Chemicals cabable of liquid-liquid equilibrium."""
chemicals = self.chemicals
chemicals_tuple = chemicals.tuple
indices = chemicals.get_vle_indices(self.mol != 0)
return [chemicals_tuple[i] for i in indices]
@property
def lle_chemicals(self):
"""list[Chemical] Chemicals cabable of vapor-liquid equilibrium."""
chemicals = self.chemicals
chemicals_tuple = chemicals.tuple
indices = chemicals.get_lle_indices(self.mol != 0)
return [chemicals_tuple[i] for i in indices]
def get_bubble_point(self, IDs=None):
"""
Return a BubblePoint object capable of computing bubble points.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.get_bubble_point()
BubblePoint([Water, Ethanol])
"""
chemicals = self.chemicals[IDs] if IDs else self.vle_chemicals
bp = self._bubble_point_cache(chemicals, self._thermo)
return bp
def get_dew_point(self, IDs=None):
"""
Return a DewPoint object capable of computing dew points.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.get_dew_point()
DewPoint([Water, Ethanol])
"""
chemicals = self.chemicals.retrieve(IDs) if IDs else self.vle_chemicals
dp = self._dew_point_cache(chemicals, self._thermo)
return dp
def bubble_point_at_T(self, T=None, IDs=None):
"""
Return a BubblePointResults object with all data on the bubble point at constant temperature.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.bubble_point_at_T()
BubblePointValues(T=350.00, P=76622, IDs=('Water', 'Ethanol'), z=[0.836 0.164], y=[0.486 0.514])
"""
bp = self.get_bubble_point(IDs)
z = self.get_normalized_mol(bp.IDs)
return bp(z, T=T or self.T)
def bubble_point_at_P(self, P=None, IDs=None):
"""
Return a BubblePointResults object with all data on the bubble point at constant pressure.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.bubble_point_at_P()
BubblePointValues(T=357.09, P=101325, IDs=('Water', 'Ethanol'), z=[0.836 0.164], y=[0.49 0.51])
"""
bp = self.get_bubble_point(IDs)
z = self.get_normalized_mol(bp.IDs)
return bp(z, P=P or self.P)
def dew_point_at_T(self, T=None, IDs=None):
"""
Return a DewPointResults object with all data on the dew point
at constant temperature.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all
chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.dew_point_at_T()
DewPointValues(T=350.00, P=48991, IDs=('Water', 'Ethanol'), z=[0.836 0.164], x=[0.984 0.016])
"""
dp = self.get_dew_point(IDs)
z = self.get_normalized_mol(dp.IDs)
return dp(z, T=T or self.T)
def dew_point_at_P(self, P=None, IDs=None):
"""
Return a DewPointResults object with all data on the dew point
at constant pressure.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all
chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.dew_point_at_P()
DewPointValues(T=368.66, P=101325, IDs=('Water', 'Ethanol'), z=[0.836 0.164], x=[0.984 0.016])
"""
dp = self.get_dew_point(IDs)
z = self.get_normalized_mol(dp.IDs)
return dp(z, P=P or self.P)
def get_normalized_mol(self, IDs):
"""
Return normalized molar fractions of given chemicals. The sum of the result is always 1.
Parameters
----------
IDs : tuple[str]
IDs of chemicals to be normalized.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kmol/hr')
>>> s1.get_normalized_mol(('Water', 'Ethanol'))
array([0.667, 0.333])
"""
z = self.imol[IDs]
z_sum = z.sum()
if not z_sum: raise RuntimeError(f'{repr(self)} is empty')
return z / z_sum
def get_normalized_mass(self, IDs):
"""
Return normalized mass fractions of given chemicals. The sum of the result is always 1.
Parameters
----------
IDs : tuple[str]
IDs of chemicals to be normalized.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kg/hr')
>>> s1.get_normalized_mass(('Water', 'Ethanol'))
array([0.667, 0.333])
"""
z = self.imass[IDs]
z_sum = z.sum()
if not z_sum: raise RuntimeError(f'{repr(self)} is empty')
return z / z_sum
def get_normalized_vol(self, IDs):
"""
Return normalized mass fractions of given chemicals. The sum of the result is always 1.
Parameters
----------
IDs : tuple[str]
IDs of chemicals to be normalized.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='m3/hr')
>>> s1.get_normalized_vol(('Water', 'Ethanol'))
array([0.667, 0.333])
"""
z = self.ivol[IDs]
z_sum = z.sum()
if not z_sum: raise RuntimeError(f'{repr(self)} is empty')
return z / z_sum
def get_molar_fraction(self, IDs):
"""
Return molar fraction of given chemicals.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kmol/hr')
>>> s1.get_molar_fraction(('Water', 'Ethanol'))
array([0.5 , 0.25])
"""
F_mol = self.F_mol
return self.imol[IDs] / F_mol if F_mol else 0.
get_molar_composition = get_molar_fraction
def get_mass_fraction(self, IDs):
"""
Return mass fraction of given chemicals.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kg/hr')
>>> s1.get_mass_fraction(('Water', 'Ethanol'))
array([0.5 , 0.25])
"""
F_mass = self.F_mass
return self.imass[IDs] / F_mass if F_mass else 0.
get_mass_composition = get_mass_fraction
def get_volumetric_fraction(self, IDs):
"""
Return volumetric fraction of given chemicals.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='m3/hr')
>>> s1.get_volumetric_fraction(('Water', 'Ethanol'))
array([0.5 , 0.25])
"""
F_vol = self.F_vol
return self.ivol[IDs] / F_vol if F_vol else 0.
get_volumetric_composition = get_volumetric_fraction
def get_concentration(self, IDs):
"""
Return concentration of given chemicals in kmol/m3.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='m3/hr')
>>> s1.get_concentration(('Water', 'Ethanol'))
array([27.672, 4.265])
"""
F_vol = self.F_vol
return self.imol[IDs] / F_vol if F_vol else 0.
@property
def P_vapor(self):
"""Vapor pressure of liquid."""
chemicals = self.vle_chemicals
F_l = eq.LiquidFugacities(chemicals, self.thermo)
IDs = tuple([i.ID for i in chemicals])
x = self.get_molar_fraction(IDs)
if x.sum() < 1e-12: return 0
return F_l(x, self.T).sum()
def receive_vent(self, other, energy_balance=True):
"""
Receive vapors from another stream by vapor-liquid equilibrium between
a gas and liquid stream assuming only a small amount of chemicals
in vapor-liquid equilibrium is present
Examples
--------
The energy balance is performed by default:
>>> import thermosteam as tmo
>>> chemicals = tmo.Chemicals(['Water', 'Ethanol', 'Methanol', tmo.Chemical('N2', phase='g')], cache=True)
>>> tmo.settings.set_thermo(chemicals)
>>> s1 = tmo.Stream('s1', N2=20, units='m3/hr', phase='g', T=330)
>>> s2 = tmo.Stream('s2', Water=10, Ethanol=2, T=330)
>>> s1.receive_vent(s2)
>>> s1.show(flow='kmol/hr')
Stream: s1
phase: 'g', T: 323.13 K, P: 101325 Pa
flow (kmol/hr): Water 0.0798
Ethanol 0.0889
N2 0.739
Set energy balance to false to receive vent isothermally:
>>> import thermosteam as tmo
>>> chemicals = tmo.Chemicals(['Water', 'Ethanol', 'Methanol', tmo.Chemical('N2', phase='g')], cache=True)
>>> tmo.settings.set_thermo(chemicals)
>>> s1 = tmo.Stream('s1', N2=20, units='m3/hr', phase='g', T=330)
>>> s2 = tmo.Stream('s2', Water=10, Ethanol=2, T=330)
>>> s1.receive_vent(s2, energy_balance=False)
>>> s1.show(flow='kmol/hr')
Stream: s1
phase: 'g', T: 330 K, P: 101325 Pa
flow (kmol/hr): Water 0.111
Ethanol 0.123
N2 0.739
"""
assert self.phase == 'g', 'stream must be a gas to receive vent'
ms = tmo.Stream(None, T=self.T, P=self.P, thermo=self.thermo)
ms.mix_from([self, other], energy_balance=False)
if energy_balance: ms.H = H = self.H + other.H
ms.vle._setup()
chemicals = ms.vle_chemicals
F_l = eq.LiquidFugacities(chemicals, ms.thermo)
IDs = tuple([i.ID for i in chemicals])
x = other.get_molar_fraction(IDs)
T = ms.T
P = ms.P
vapor = ms['g']
liquid = ms['l']
F_mol_vapor = vapor.F_mol
mol_old = liquid.imol[IDs]
if energy_balance:
def equilibrium_approximation(T):
f_l = F_l(x, T)
y = f_l / P
mol_new = F_mol_vapor * y
vapor.imol[IDs] = mol_new
liquid.imol[IDs] = mol_old - mol_new
index = liquid.mol < 0.
vapor.mol[index] += liquid.mol[index]
liquid.mol[index] = 0
ms.H = H
return ms.T
flx.wegstein(equilibrium_approximation, T)
else:
f_l = F_l(x, T)
y = f_l / P
mol_new = F_mol_vapor * y
vapor.imol[IDs] = mol_new
liquid.imol[IDs] = mol_old - mol_new
index = liquid.mol < 0.
vapor.mol[index] += liquid.mol[index]
liquid.mol[index] = 0
self.copy_like(vapor)
other.copy_like(liquid)
self.T = other.T = ms.T
### Casting ###
@property
def islinked(self):
"""
[bool] Whether data regarding the thermal condition, material flow rates,
and phases are shared with other streams.
"""
return self._islinked
@property
def phases(self):
"""tuple[str] All phases present."""
return (self.phase,)
@phases.setter
def phases(self, phases):
if self.phases == phases: return
if self._islinked: self.unlink()
if len(phases) == 1:
self.phase = phases[0]
else:
self.__class__ = tmo.MultiStream
self._imol = self._imol.to_material_indexer(phases)
self._streams = {}
self._vle_cache = eq.VLECache(self._imol,
self._thermal_condition,
self._thermo,
self._bubble_point_cache,
self._dew_point_cache)
self._lle_cache = eq.LLECache(self._imol,
self._thermal_condition,
self._thermo)
self._sle_cache = eq.SLECache(self._imol,
self._thermal_condition,
self._thermo)
### Representation ###
def _basic_info(self):
return f"{type(self).__name__}: {self.ID or ''}\n"
def _info_phaseTP(self, phase, T_units, P_units):
T = thermo_units.convert(self.T, 'K', T_units)
P = thermo_units.convert(self.P, 'Pa', P_units)
s = '' if isinstance(phase, str) else 's'
return f" phase{s}: {repr(phase)}, T: {T:.5g} {T_units}, P: {P:.6g} {P_units}\n"
def _source_info(self):
source = self.source
return f"{source}-{source.outs.index(self)}" if source else self.ID
def _translate_layout(self, layout, flow, composition, N):
if layout:
for param in (flow, composition, N):
if param is not None: raise ValueError(f'cannot specify both `layout` and `{param}`')
if layout[0] == 'c':
composition = True
layout = layout[1:]
if layout.startswith('wt'):
flow = 'kg/hr'
layout = layout[2:]
elif layout.startswith('mol'):
flow = 'kmol/hr'
layout = layout[3:]
elif layout.startswith('vol'):
flow = 'm3/hr'
layout = layout[3:]
elif layout.isdigit():
flow = 'kmol/hr'
else:
raise ValueError(
"`layout` must have the form "
"{'c' or ''}{'wt', 'mol' or 'vol'}{# or ''};"
"for example: 'cwt100' corresponds to compostion=True, "
"flow='kg/hr', and N=100."
)
if layout.isdigit():
N = int(layout)
return flow, composition, N
def _info(self, layout, T, P, flow, composition, N, IDs):
"""Return string with all specifications."""
flow, composition, N = self._translate_layout(layout, flow, composition, N)
from .indexer import nonzeros
basic_info = self._basic_info()
if not IDs:
IDs = self.chemicals.IDs
data = self.imol.data
else:
data = self.imol[IDs]
IDs, data = nonzeros(IDs, data)
IDs = tuple(IDs)
display_units = self.display_units
T_units = T or display_units.T
P_units = P or display_units.P
flow_units = flow or display_units.flow
N_max = display_units.N if N is None else N
basic_info += self._info_phaseTP(self.phase, T_units, P_units)
if N_max == 0:
return basic_info[:-1]
composition = display_units.composition if composition is None else composition
N_IDs = len(IDs)
if N_IDs == 0:
return basic_info + ' flow: 0'
# Start of third line (flow rates)
name, factor = self._get_flow_name_and_factor(flow_units)
indexer = getattr(self, 'i' + name)
# Remaining lines (all flow rates)
flow_array = factor * indexer[IDs]
if composition:
total_flow = flow_array.sum()
beginning = " composition: "
new_line = '\n' + 14 * ' '
flow_array = flow_array/total_flow
else:
beginning = f' flow ({flow_units}): '
new_line = '\n' + len(beginning) * ' '
flow_rates = ''
lengths = [len(i) for i in IDs]
maxlen = max(lengths) + 2
too_many_chemicals = N_IDs > N_max
N = N_max if too_many_chemicals else N_IDs
for i in range(N):
spaces = ' ' * (maxlen - lengths[i])
if i: flow_rates += new_line
flow_rates += IDs[i] + spaces + f'{flow_array[i]:.3g}'
if too_many_chemicals: flow_rates += new_line + '...'
if composition:
dashes = '-' * (maxlen - 2)
flow_rates += f"{new_line}{dashes} {total_flow:.3g} {flow_units}"
return (basic_info
+ beginning
+ flow_rates)
def show(self, layout=None, T=None, P=None, flow=None, composition=None, N=None, IDs=None):
"""
Print all specifications.
Parameters
----------
layout : str, optional
Convenience paramater for passing `flow`, `composition`, and `N`.
Must have the form {'c' or ''}{'wt', 'mol' or 'vol'}{# or ''}.
For example: 'cwt100' corresponds to compostion=True, flow='kg/hr',
and N=100.
T : str, optional
Temperature units.
P : str, optional
Pressure units.
flow : str, optional
Flow rate units.
composition : bool, optional
Whether to show composition.
N : int, optional
Number of compounds to display.
IDs : tuple[str], optional
IDs of compounds to display. Defaults to all chemicals
.
Notes
-----
Default values are stored in `Stream.display_units`.
"""
print(self._info(layout, T, P, flow, composition, N, IDs))
_ipython_display_ = show
def print(self, units=None):
"""
Print in a format that you can use recreate the stream.
Parameters
----------
units : str, optional
Units of measure for material flow rates. Defaults to 'kmol/hr'
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream(ID='s1',
... Water=20, Ethanol=10, units='kg/hr',
... T=298.15, P=101325, phase='l')
>>> s1.print(units='kg/hr')
Stream(ID='s1', phase='l', T=298.15, P=101325, Water=20, Ethanol=10, units='kg/hr')
>>> s1.print() # Units default to kmol/hr
Stream(ID='s1', phase='l', T=298.15, P=101325, Water=1.11, Ethanol=0.2171, units='kmol/hr')
"""
if not units:
units = 'kmol/hr'
flow = self.mol
else:
flow = self.get_flow(units)
chemical_flows = utils.repr_IDs_data(self.chemicals.IDs, flow)
price = utils.repr_kwarg('price', self.price)
print(f"{type(self).__name__}(ID={repr(self.ID)}, phase={repr(self.phase)}, T={self.T:.2f}, "
f"P={self.P:.6g}{price}{chemical_flows}, units={repr(units)})")
| 2.15625 | 2 |
lombscargle/utils.py | jakevdp/nfftls | 2 | 12769512 | <filename>lombscargle/utils.py
from __future__ import division
import numpy as np
from pynfft.nfft import NFFT
def complex_exponential_sum(t, y, f0, Nf, df, method='auto'):
"""Compute a trigonometric sum over frequencies f = f0 + df * range(Nf)
The computed sum is S_m = sum_k [y_k * exp(2 * i * f_m * t_k)]
Parameters
----------
t : array_like, length N
Sorted array of times
y : array_like, length N
array of weights
f0 : float
minimum frequency
Nf : integer
number of frequencies. Must be an even number
df : float
frequency step. Must be smaller than 1 / (t.max() - t.min()).
method : string, default='auto'
method to use. Must be one of ['auto', 'direct', 'slow', 'nfft', 'ndft']
Returns
-------
S : ndarray, length Nf
resulting sums at each frequency
"""
assert method in ['direct', 'nfft', 'ndft', 'slow', 'auto']
freq = f0 + df * np.arange(Nf)
t = np.asarray(t)
assert t.ndim == 1
Nf = int(Nf)
assert Nf > 0
assert Nf % 2 == 0
y = np.asarray(y)
y = np.broadcast_to(y, np.broadcast(y, t).shape)
T = t.max() - t.min()
F_O = 1. / (df * T)
assert F_O > 1
if method == 'direct':
return np.dot(y, np.exp(2j * np.pi * freq * t[:, np.newaxis]))
else:
# shift the times to the range (-0.5, 0.5)
Tstar = F_O * T
t0 = t.min() - 0.5 * (Tstar - T)
q = -0.5 + (t - t0) / Tstar
# wrap phase into the weights
x = y * np.exp(2j * np.pi * (f0 * Tstar + 0.5 * Nf) * q)
Qn = simple_complex_exponential_sum(t=q, y=x, Nf=Nf//2, method=method)
# multiply by the phase that corrects for the above shifts
return Qn * np.exp(2j * np.pi * (t0 + 0.5 * Tstar) * freq)
def simple_complex_exponential_sum(t, y, Nf, method='auto'):
"""Compute a trigonometric sum over frequencies f = range(-Nf, Nf)
The computed sum is S_m = sum_k [y_k * exp(2 * i * n * f_m * t_k)]
Parameters
----------
t : array_like, length N
Sorted array of times in the interval [-0.5, 0.5]
y : array_like, broadcastable with t
array of weights
Nf : integer
number of frequencies
method : string, default='auto'
method to use. Must be one of ['auto', 'slow', 'nfft', 'ndft']
Returns
-------
S : ndarray, shape y.shape[:-1] + (2 * Nf,)
resulting sums at each frequency
"""
assert method in ['nfft', 'ndft', 'slow', 'auto']
t = np.asarray(t)
assert t.ndim == 1
assert t.min() > -0.5 and t.max() < 0.5
Nf = int(Nf)
y = np.asarray(y)
y = np.broadcast_to(y, np.broadcast(y, t).shape)
outshape = y.shape[:-1] + (2 * Nf,)
y = y.reshape(-1, len(t))
if method == 'auto':
method = 'nfft'
if method == 'slow':
freq = np.arange(-Nf, Nf)
results = np.dot(y, np.exp(2j * np.pi * freq * t[:, np.newaxis]))
else:
use_dft = (method == 'ndft')
plan = NFFT(2 * Nf, len(t))
plan.x = t
plan.precompute()
results = np.zeros((y.shape[0], 2 * Nf), dtype=complex)
for i in range(y.shape[0]):
plan.f = y[i]
results[i] = plan.adjoint(use_dft=use_dft).copy()
return results.reshape(outshape)
| 2.8125 | 3 |
mushroom_rl/environments/mujoco_envs/humanoid_gait/humanoid_gait.py | PuzeLiu/mushroom-rl | 344 | 12769513 | import mujoco_py
from pathlib import Path
from mushroom_rl.utils import spaces
from mushroom_rl.environments.mujoco import MuJoCo, ObservationType
from mushroom_rl.utils.running_stats import *
from ._external_simulation import NoExternalSimulation, MuscleSimulation
from .reward_goals import CompleteTrajectoryReward, VelocityProfileReward, \
MaxVelocityReward, NoGoalReward, HumanoidTrajectory
from mushroom_rl.environments.mujoco_envs.humanoid_gait.utils import quat_to_euler
class HumanoidGait(MuJoCo):
"""
Mujoco simulation of a Humanoid Model, based on:
"A deep reinforcement learning based approach towards generating human
walking behavior with a neuromuscular model".
<NAME>., <NAME>., <NAME>., and <NAME>. (2019).
"""
def __init__(self, gamma=0.99, horizon=2000, n_intermediate_steps=10,
use_muscles=True, goal_reward=None, goal_reward_params=None,
obs_avg_window=1, act_avg_window=1):
"""
Constructor.
Args:
gamma (float, 0.99): discount factor for the environment;
horizon (int, 2000): horizon for the environment;
n_intermediate_steps (int, 10): number of steps to apply the same
action to the environment and wait for the next observation;
use_muscles (bool): if external muscle simulation should be used
for actions. If not apply torques directly to the joints;
goal_reward (string, None): type of trajectory used for training
Options available:
'trajectory' - Use trajectory in assets/GaitTrajectory.npz
as reference;
'com_vel_trajectory' - Use only velocity trajectory of COM in
assets/GaitTrajectory.npz as reference;
'vel_profile' - Velocity goal for the center of mass of the
model to follow. The goal is given by a
VelocityProfile instance (or subclass).
And should be included in the
``goal_reward_params``;
'max_vel' - Tries to achieve the maximum possible
velocity;
None - Follows no goal(just tries to survive);
goal_reward_params (dict, None): params needed for creation goal
reward;
obs_avg_window (int, 1): size of window used to average
observations;
act_avg_window (int, 1): size of window used to average actions.
"""
self.use_muscles = use_muscles
self.goal_reward = goal_reward
self.act_avg_window = act_avg_window
self.obs_avg_window = obs_avg_window
model_path = Path(__file__).resolve().parent.parent / "data" / "humanoid_gait" / "human7segment.xml"
action_spec = ["right_hip_frontal", "right_hip_sagittal",
"right_knee", "right_ankle", "left_hip_frontal",
"left_hip_sagittal", "left_knee", "left_ankle",
]
observation_spec = [("root", ObservationType.JOINT_POS),
("right_hip_frontal", ObservationType.JOINT_POS),
("right_hip_sagittal", ObservationType.JOINT_POS),
("right_knee", ObservationType.JOINT_POS),
("right_ankle", ObservationType.JOINT_POS),
("left_hip_frontal", ObservationType.JOINT_POS),
("left_hip_sagittal", ObservationType.JOINT_POS),
("left_knee", ObservationType.JOINT_POS),
("left_ankle", ObservationType.JOINT_POS),
("root", ObservationType.JOINT_VEL),
("right_hip_frontal", ObservationType.JOINT_VEL),
("right_hip_sagittal", ObservationType.JOINT_VEL),
("right_knee", ObservationType.JOINT_VEL),
("right_ankle", ObservationType.JOINT_VEL),
("left_hip_frontal", ObservationType.JOINT_VEL),
("left_hip_sagittal", ObservationType.JOINT_VEL),
("left_knee", ObservationType.JOINT_VEL),
("left_ankle", ObservationType.JOINT_VEL),
]
collision_groups = [("floor", ["floor"]),
("left_foot", ["left_foot"]),
("right_foot", ["right_foot"])
]
super().__init__(model_path.as_posix(), action_spec, observation_spec, gamma=gamma,
horizon=horizon, n_substeps=1,
n_intermediate_steps=n_intermediate_steps,
collision_groups=collision_groups)
if use_muscles:
self.external_actuator = MuscleSimulation(self._sim)
self.info.action_space = spaces.Box(
*self.external_actuator.get_action_space())
else:
self.external_actuator = NoExternalSimulation()
low, high = self.info.action_space.low.copy(),\
self.info.action_space.high.copy()
self.norm_act_mean = (high + low) / 2.0
self.norm_act_delta = (high - low) / 2.0
self.info.action_space.low[:] = -1.0
self.info.action_space.high[:] = 1.0
if goal_reward_params is None:
goal_reward_params = dict()
if goal_reward == "trajectory" or goal_reward == "com_vel_trajectory":
control_dt = self._sim.model.opt.timestep * self._n_intermediate_steps
self.goal_reward = CompleteTrajectoryReward(self._sim, control_dt,
**goal_reward_params)
elif goal_reward == "vel_profile":
self.goal_reward = VelocityProfileReward(self._sim, **goal_reward_params)
elif goal_reward == "max_vel":
self.goal_reward = MaxVelocityReward(self._sim, **goal_reward_params)
elif goal_reward is None:
self.goal_reward = NoGoalReward()
else:
raise NotImplementedError("The specified goal reward has not been"
"implemented: ", goal_reward)
if goal_reward == "trajectory":
self.reward_weights = dict(live_reward=0.10, goal_reward=0.40,
traj_vel_reward=0.50,
move_cost=0.10, fall_cost=0.00)
elif goal_reward == "com_vel_trajectory":
self.reward_weights = dict(live_reward=0.00, goal_reward=0.00,
traj_vel_reward=1.00,
move_cost=0.00, fall_cost=0.00)
else:
self.reward_weights = dict(live_reward=0.10, goal_reward=0.90,
traj_vel_reward=0.00,
move_cost=0.10, fall_cost=0.00)
self.info.observation_space = spaces.Box(*self._get_observation_space())
self.mean_grf = RunningAveragedWindow(shape=(6,),
window_size=n_intermediate_steps)
self.mean_vel = RunningExpWeightedAverage(shape=(3,), alpha=0.005)
self.mean_obs = RunningAveragedWindow(
shape=self.info.observation_space.shape,
window_size=obs_avg_window
)
self.mean_act = RunningAveragedWindow(
shape=self.info.action_space.shape, window_size=act_avg_window)
def step(self, action):
action = ((action.copy() * self.norm_act_delta) + self.norm_act_mean)
state, reward, absorbing, info = super().step(action)
self.mean_obs.update_stats(state)
self.mean_vel.update_stats(self._sim.data.qvel[0:3])
avg_obs = self.mean_obs.mean
avg_obs[13:16] = self.mean_vel.mean
return avg_obs, reward, absorbing, info
def render(self):
if self._viewer is None:
self._viewer = mujoco_py.MjViewer(self._sim)
self._viewer._render_every_frame = True
self._viewer.render()
def _setup(self):
self.goal_reward.reset_state()
start_obs = self._reset_model(qpos_noise=0.0, qvel_noise=0.0)
start_vel = (
self._sim.data.qvel[0:3] if (self.goal_reward is None or isinstance(
self.goal_reward, MaxVelocityReward)
) else self.goal_reward.get_observation())
self.mean_vel.reset(start_vel)
self.mean_obs.reset(start_obs)
self.mean_act.reset()
self.external_actuator.reset()
def _reward(self, state, action, next_state):
live_reward = 1.0
goal_reward = self.goal_reward(state, action, next_state)
traj_vel_reward = 0.0
if isinstance(self.goal_reward, HumanoidTrajectory):
traj_vel_reward = np.exp(-20.0 * np.square(
next_state[13] - next_state[33]))
move_cost = self.external_actuator.cost(
state, action / self.norm_act_delta, next_state)
fall_cost = 0.0
if self._has_fallen(next_state):
fall_cost = 1.0
total_reward = self.reward_weights["live_reward"] * live_reward \
+ self.reward_weights["goal_reward"] * goal_reward \
+ self.reward_weights["traj_vel_reward"] * traj_vel_reward \
- self.reward_weights["move_cost"] * move_cost \
- self.reward_weights["fall_cost"] * fall_cost
return total_reward
def _is_absorbing(self, state):
return (self._has_fallen(state)
or self.goal_reward.is_absorbing(state)
or self.external_actuator.is_absorbing(state)
)
def _get_observation_space(self):
sim_low, sim_high = (self.info.observation_space.low[2:],
self.info.observation_space.high[2:])
grf_low, grf_high = (-np.ones((6,)) * np.inf,
np.ones((6,)) * np.inf)
r_low, r_high = self.goal_reward.get_observation_space()
a_low, a_high = self.external_actuator.get_observation_space()
return (np.concatenate([sim_low, grf_low, r_low, a_low]),
np.concatenate([sim_high, grf_high, r_high, a_high]))
def _reset_model(self, qpos_noise=0.0, qvel_noise=0.0):
self._set_state(self._sim.data.qpos + np.random.uniform(
low=-qpos_noise, high=qpos_noise, size=self._sim.model.nq),
self._sim.data.qvel + np.random.uniform(low=-qvel_noise,
high=qvel_noise,
size=self._sim.model.nv)
)
return self._create_observation()
def _set_state(self, qpos, qvel):
old_state = self._sim.get_state()
new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,
old_state.act, old_state.udd_state)
self._sim.set_state(new_state)
self._sim.forward()
@staticmethod
def _has_fallen(state):
torso_euler = quat_to_euler(state[1:5])
return ((state[0] < 0.90) or (state[0] > 1.20)
or abs(torso_euler[0]) > np.pi / 12
or (torso_euler[1] < -np.pi / 12) or (torso_euler[1] > np.pi / 8)
or (torso_euler[2] < -np.pi / 4) or (torso_euler[2] > np.pi / 4)
)
def _create_observation(self):
"""
Creates full vector of observations:
obs[0:13] -> qpos(from mujoco obs)
obs[0] -> torso z pos
obs[1:5] -> torso quaternion orientation
obs[5:13] -> leg joints angle
obs[13:27] -> qvel(from mujoco obs)
obs[13:16] -> torso linear velocity
obs[16:19] -> torso angular velocity
obs[19:27] -> leg joints angular velocity
obs[27:30] -> ground force
obs[27:30] -> ground force on right foot(xyz)
obs[30:33] -> ground force on left foot(xyz)
obs[33:33+(len(goal_observation)] -> observations related
to the goal
obs[last_obs_id - len(ext_actuator_obs): last_obs_id]
-> observations related to the external actuator
"""
obs = np.concatenate([super(HumanoidGait, self)._create_observation()[2:],
self.mean_grf.mean / 1000.,
self.goal_reward.get_observation(),
self.external_actuator.get_observation()
]).flatten()
return obs
def _preprocess_action(self, action):
action = self.external_actuator.preprocess_action(action)
self.mean_act.update_stats(action)
return self.mean_act.mean
def _step_init(self, state, action):
self.external_actuator.initialize_internal_states(state, action)
def _compute_action(self, action):
action = self.external_actuator.external_stimulus_to_joint_torques(
action
)
return action
def _simulation_post_step(self):
grf = np.concatenate(
[self._get_collision_force("floor", "right_foot")[:3],
self._get_collision_force("floor", "left_foot")[:3]]
)
self.mean_grf.update_stats(grf)
def _step_finalize(self):
self.goal_reward.update_state()
self.external_actuator.update_state()
def _get_body_center_of_mass_pos(self, body_name):
return self._sim.data.subtree_com[
self._sim.model._body_name2id[body_name]]
| 2.25 | 2 |
tests/test_shellcraft.py | maebert/shellcraft | 14 | 12769514 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_shellcraft.
Tests for `shellcraft` module.
"""
from __future__ import unicode_literals
import os
import pytest
from click.testing import CliRunner
import pkg_resources
from shellcraft.cli import get_game, cli
from shellcraft.shellcraft import Game
@pytest.fixture(scope="module")
def game():
"""Create a local game."""
runner = CliRunner()
with runner.isolated_filesystem():
game = get_game("test.json")
return game
def load_game(filename):
"""Load game from fixtures."""
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "fixtures", filename
)
print(filename)
return Game.load(filename)
def test_basic_cli(game):
"""Test that the interface loads."""
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0
assert "Welcome to ShellCraft" in result.output
help_result = runner.invoke(cli, ["--help"])
assert help_result.exit_code == 0
assert "Show this message and exit." in help_result.output
version_result = runner.invoke(cli, ["--version"])
assert version_result.exit_code == 0
assert pkg_resources.get_distribution("shellcraft").version in version_result.output
def test_contract(game):
game = load_game("save1.json")
assert game.resources.get("clay") == 30
def test_game_run(game):
"""Test that the basic game flow works."""
commands = """
mine clay
mine clay
mine clay
mine clay
craft shovel
mine clay
mine clay
mine clay
mine clay
mine clay
craft sturdy_shovel
mine clay
mine clay
mine clay
mine clay
mine clay
mine clay
mine clay
mine clay
mine clay
mine clay
research small_cart
craft small_cart
mine clay"""
runner = CliRunner()
game.state.debug = True
for command in commands.splitlines():
assert not command or command.split()[0] in list(
game.state.commands_enabled
), "{} not in {}".format(command.split()[0], list(game.state.commands_enabled))
runner.invoke(cli, command.split())
game.tutorial.cont()
assert "small_cart" in game.state.research_completed
assert game.state.tutorial_step == 11
assert game.resources.get("clay") == 4
| 2.296875 | 2 |
iot_device/env.py | iot49/iot-device | 0 | 12769515 | <reponame>iot49/iot-device
# env.py
from .utilities import cd
import os
"""
Environment Variables
Name Default
* IOT_PROJECTS ~/projects
* IOT_DEVICES $IOT_PROJECTS/devices
* IOT_LIBS $IOT_PROJECTS/libs
* IOT_SECRETS $IOT_LIBS/secrets.py
"""
class Env:
@staticmethod
def expand_path(path):
return os.path.expanduser(os.path.expandvars(path))
@staticmethod
def iot_projects():
return os.getenv('IOT_PROJECTS', '~/projects')
@staticmethod
def iot_devices():
return os.getenv('IOT_DEVICES', os.path.join(Env.iot_projects(), 'devices'))
@staticmethod
def iot_libs():
return os.getenv('IOT_LIBS', os.path.join(Env.iot_projects(), 'libs'))
@staticmethod
def iot_secrets():
return os.getenv('IOT_SECRETS', os.path.join(Env.iot_libs(), 'secrets.py'))
@staticmethod
def print_config():
print("IOT_PROJECTS:", Env.iot_projects())
print("IOT_DEVICES: ", Env.iot_devices())
print("IOT_LIBS: ", Env.iot_libs())
print("IOT_SECRETS: ", Env.iot_secrets())
# ensure this is defined ...
if not os.getenv('IOT_PROJECTS'):
os.environ['IOT_PROJECTS'] = '~/projects'
| 2.4375 | 2 |
CodeWars/Python/Dashatize it.py | nirgn975/CodeWars | 28 | 12769516 | <reponame>nirgn975/CodeWars<gh_stars>10-100
"""
Given a number, return a string with dash `'-'` marks before and after each odd integer, but do not begin or end the string with a dash mark.
"""
def dashatize(num):
if not num and not (num == 0):
return 'None'
result = '-'.join(str(abs(num)))
copy_result = result[:]
occurrences = 0
for index, value in enumerate(result):
if value == '-':
# Check if there is need to be a '-' here.
if int(result[index - 1]) % 2 == 0 and int(result[index + 1]) % 2 == 0:
copy_result = copy_result[:index - occurrences] + copy_result[index + 1 - occurrences:]
occurrences += 1
return copy_result
| 3.875 | 4 |
data_processing/prepare_validation_data.py | saverymax/qdriven-chiqa-summarization | 10 | 12769517 | """
Module for classes to prepare validation dataset from MedInfo dataset.
Data format will be {key: {'question': question, 'summary':, summ, 'articles': articles} ...}
Additionally, format for question driven summarization. For example:
python prepare_validation_data.py -t --add-q
"""
import json
import argparse
import re
import spacy
import rouge
def get_args():
"""
Argument defnitions
"""
parser = argparse.ArgumentParser(description="Arguments for data exploration")
parser.add_argument("--pg",
dest="pg",
action="store_true",
help="tag the sentences with <s> and </s>, for use with pointer generator network")
parser.add_argument("--bart",
dest="bart",
action="store_true",
help="Prepare data for BART")
parser.add_argument("--add-q",
dest="add_q",
action="store_true",
help="Concatenate the question to the beginning of the text for question driven summarization")
return parser
class MedInfo():
def __init__(self):
"""
Initiate class for processing medinfo collection
"""
self.nlp = spacy.load('en_core_web_sm')
if args.add_q:
self.q_name = "_with_question"
else:
self.q_name = "_without_question"
def _load_collection(self):
"""
Load medinfo collection prepared in the process_medinfo.py script
"""
with open("data/medinfo_collection.json", "r", encoding="utf-8") as f:
medinfo = json.load(f)
return medinfo
def _format_summary_sentences(self, summary):
"""
Split summary into sentences and add sentence tags to the strings: <s> and </s>
"""
tokenized_abs = self.nlp(summary)
summary = " ".join(["<s> {s} </s>".format(s=s.text.strip()) for s in tokenized_abs.sents])
return summary
def save_section2answer_validation_data(self, tag_sentences):
"""
For questions that have a corresponding section-answer pair, save the
validation data in following format
{'question': {'summary': text, 'articles': text}}
"""
dev_dict = {}
medinfo = self._load_collection()
data_pair = 0
Q_END = " [QUESTION?] "
for i, question in enumerate(medinfo):
try:
# There may be multiple answers per question, but for the sake of the validation set,
# just use the first answer
if 'section_text' in medinfo[question][0]:
article = medinfo[question][0]['section_text']
summary = medinfo[question][0]['answer']
# Stripping of whitespace was done in processing script for section and full page
# but not for answer or question
summary = re.sub(r"\s+", " ", summary)
question = re.sub(r"\s+", " ", question)
if args.add_q:
article = question + Q_END + article
assert len(summary) <= (len(article) + 10)
if tag_sentences:
summary = self._format_summary_sentences(summary)
tag_string = "_s-tags"
else:
tag_string = ""
data_pair += 1
dev_dict[i] = {'question': question, 'summary': summary, 'articles': article}
except AssertionError:
print("Answer longer than summary. Skipping element")
print("Number of page-section pairs:", data_pair)
with open("data/medinfo_section2answer_validation_data{0}{1}.json".format(self.q_name, tag_string), "w", encoding="utf-8") as f:
json.dump(dev_dict, f, indent=4)
def process_data():
"""
Main function for saving data
"""
# Run once for each
if args.pg:
MedInfo().save_section2answer_validation_data(tag_sentences=True)
if args.bart:
MedInfo().save_section2answer_validation_data(tag_sentences=False)
if __name__ == "__main__":
global args
args = get_args().parse_args()
process_data()
| 3.21875 | 3 |
algorithms/sorting_algorithms/merge_sort.py | onyonkaclifford/data-structures-and-algorithms | 0 | 12769518 | <gh_stars>0
from typing import List
def merge_sort(x: List) -> List:
"""Merge sort divides a list into two smaller lists, and recursively repeats the process on the two smaller lists
till lists of single elements are obtained. These smaller lists are then combined to form a single sorted list of
the original elements. It has an average time complexity of Θ(nlogn). Time complexity for the worst case is
O(nlogn). Time complexity for the best case is Ω(nlogn).
>>> merge_sort([4, 2, 3, 1, 0, 5])
[0, 1, 2, 3, 4, 5]
:param x: list to be sorted
:return: new sorted list
"""
length = len(x)
if length <= 1:
return x
mid_idx = length // 2
left = merge_sort(x[0:mid_idx])
right = merge_sort(x[mid_idx:length])
result = []
while len(left) > 0 and len(right) > 0:
if left[0] <= right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
result.extend(left)
result.extend(right)
return result
| 4.15625 | 4 |
TTI_computing/test.py | XDZhelheim/TrafficDataAnalysis | 0 | 12769519 | <reponame>XDZhelheim/TrafficDataAnalysis
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
# 设置距离
x = np.array([0, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 70, 8, 9, 10])
# 设置相似度
y = np.array([0.8579087793827057, 0.8079087793827057, 0.7679087793827057, 0.679087793827057,
0.5579087793827057, 0.4579087793827057, 0.3079087793827057, 0.3009087793827057,
0.2579087793827057, 0.2009087793827057, 0.1999087793827057, 0.1579087793827057,
0.0099087793827057, 0.0079087793827057, 0.0069087793827057, 0.0019087793827057,
0.0000087793827057])
# 插值法之后的x轴值,表示从0到10间距为0.5的200个数
xnew = np.arange(0, 10, 0.1)
# 实现函数
func = interpolate.interp1d(x, y, kind='cubic')
# 利用xnew和func函数生成ynew,xnew数量等于ynew数量
ynew = func(xnew)
# 原始折线
plt.plot(x, y, "r", linewidth=1)
# 平滑处理后曲线
plt.plot(xnew, ynew)
# 设置x,y轴代表意思
plt.xlabel("The distance between POI and user(km)")
plt.ylabel("probability")
# 设置标题
plt.title("The content similarity of different distance")
# 设置x,y轴的坐标范围
plt.xlim(0, 10, 8)
plt.ylim(0, 1)
plt.show() | 2.609375 | 3 |
dcclient/tests/test_rpc.py | asgard-lab/driver | 0 | 12769520 | <filename>dcclient/tests/test_rpc.py<gh_stars>0
import testtools
import mock
import neutron.plugins.ml2.drivers.datacom.dcclient.rpc as rpc
import neutron.plugins.ml2.drivers.datacom.dcclient.xml_manager.\
data_structures as ds
from StringIO import StringIO as sio
import gzip
# mock requests lib so it does not actually try to send the packet
import requests
requests.post = mock.Mock()
class main_test(testtools.TestCase):
def test_send_xml(self):
test_url = '1.1.1.1'
test_auth = ('user', '<PASSWORD>')
test_method = ('https')
test_rpc = rpc.RPC(test_auth[0], test_auth[1], test_url, test_method)
rpc.get = mock.Mock()
cfg = ds.CfgData()
vlan = ds.VlanGlobal(42, name="vlan_test", ports=ds.Pbits([1, 3, 4]))
cfg.vlans.append(vlan)
test_rpc.send_xml(cfg.as_xml_text())
# mock_calls returns the calls executed to this method, the 0 means we
# are getting the first (and only) call, and the 2 means we are getting
# the keyword parameters.
parameters = requests.post.mock_calls[0][2]
# retrieve url
received_url = parameters['url']
expected_url = 'https://1.1.1.1/System/File/file_config.html'
self.assertEquals(expected_url, received_url)
# retrieve data from the parameters.
data = parameters['data']
expected_xml = '<cfg_data><vlan_global id0="42"><vid>42</vid>' + \
'<active>1</active><name>vlan_test</name>' + \
'<pbmp_untagged id0="0"><pbits id0="0">13</pbits>' + \
'</pbmp_untagged></vlan_global></cfg_data>'
# Since the XML has to be the last parameter to be passed, we have to
# get the last field.
zippedXML = data.fields[-1][-1][-2]
# decompress to do the comparison.
# get the file with the zipped xml as content
zipFileObject = sio(zippedXML)
# get the actual file (from which we read)
with gzip.GzipFile(fileobj=zipFileObject, mode='r') as zipFile:
received_xml = zipFile.read()
zipFileObject.close()
self.assertEquals(expected_xml, received_xml)
| 2.40625 | 2 |
logger/logger_meta/__init__.py | JiahuiLei/Pix2Surf | 26 | 12769521 | from .metric_logger import MetricLogger
from .image_logger import ImageLogger
from .model_logger import ModelLogger
from .xls_logger import XLSLogger
from .obj_logger import ObjectLogger
LOGGER_REGISTED = {
'metric': MetricLogger,
'image': ImageLogger,
'model': ModelLogger,
'xls': XLSLogger,
'obj': ObjectLogger,
}
| 1.234375 | 1 |
casino/test/test_roulette_cancellation.py | gauravvazirani/ooad | 0 | 12769522 | import wheel
from src import roulette_cancellation
from src import bet
from src import table
from src import wheel
from src import roulette_game
import unittest
class TestCancellation(unittest.TestCase):
def setUp(self):
self.wheel = wheel.Wheel()
self.table = table.Table(minimum=10, maximum=1000)
self.game = roulette_game.RouletteGame(self.wheel, self.table)
self.player = roulette_cancellation.RouletteCancellation(table=self.table, wheel=self.wheel)
def test_placeBets(self):
self.assertEqual(len(self.table.bets), 0)
self.assertEqual(self.player.stake, 10000)
self.player.placeBets(self.game)
self.assertEqual(len(self.table.bets), 1)
self.assertEqual(self.player.stake, 9993)
def test_win(self):
self.assertEqual(len(self.player.sequence), 6)
if len(self.player.sequence)==1:
bet_amount = self.player.sequence[0]
elif len(self.player.sequence)>1:
bet_amount = self.player.sequence[0] + self.player.sequence[-1]
self.player.win(bet.Bet(outcome=self.wheel.all_outcomes.get('Black'), amount=bet_amount))
self.assertEqual(len(self.player.sequence), 4)
self.assertEqual(len(self.player.sequence), 4)
def test_lose(self):
self.assertEqual(len(self.player.sequence), 6)
self.player.lose()
self.assertEqual(len(self.player.sequence), 7)
self.assertEqual(self.player.sequence[-1], self.player.sequence[0]+self.player.sequence[-2])
def tearDown(self):
self.wheel = None
self.table = None
self.player = None
if __name__ == '__main__':
unittest.main()
| 3 | 3 |
code/gini_calculator.py | leifos/retrievability | 0 | 12769523 | # AUTHOR: <NAME>
# 13-02-2022
# calculate the gini coefficient give the retrievability file
import os
import argparse
from collections import defaultdict
def check_file_exists(filename):
if filename and not os.path.exists(filename):
print("{0} Not Found".format(filename))
quit(1)
def calculate_gini(list_of_values):
# https://planspace.org/2013/06/21/how-to-calculate-gini-coefficient-from-raw-data-in-python/
sorted_list = sorted(list_of_values)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(list_of_values) / 2.
return (fair_area - area) / fair_area
def process_results(ret_file):
ret_scores = []
ret_total = 0.0
with open(ret_file, "r") as rf:
while rf:
line = rf.readline().strip()
if not line:
break
(doc_id, score) = line.split('\t')
doc_id = doc_id.strip()
score = float(score.strip())
ret_scores.append(score)
ret_total += score
rf.close()
print(f'Read in {len(ret_scores)} scores.')
print(f'Total Retrievability Mass: {ret_total:.4f}')
g = calculate_gini(ret_scores)
print(f'Gini Cofficient is: {g:.4f}')
def parse_args():
arg_parser = argparse.ArgumentParser(description="Gini Cofficient Calculator")
arg_parser.add_argument("ret_file", help="A retrievability file. Two colum tab/space sep file with fields:"
"doc_id retrievability_score")
args = arg_parser.parse_args()
return args
def main(ret_file):
print(f'About to compute the Gini given the retrievability file {ret_file}')
process_results(ret_file)
print(f'Done!')
if __name__ == '__main__':
args = parse_args()
check_file_exists(args.ret_file)
main(args.ret_file) | 3.578125 | 4 |
ycimpute/imputer/iterforest.py | egemenzeytinci/ycimpute | 74 | 12769524 | from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import check_array
import numpy as np
from ..utils.tools import Solver
class MissForest(Solver):
def __init__(
self,
n_estimators=300,
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features='auto',
max_samples=None,
normalizer='min_max'):
"""
Parameters
----------
n_estimators: integer, optional (default=10)
max_depth: integer or None, optional (default=None)
The maximum depth of the tree.
If None, then nodes are expanded until all leaves are pure
or until all leaves contain less than min_samples_split samples.
min_samples_split: int, float, optional (default=2)
The minimum number of samples required to split an internal node
min_samples_leaf: int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves
at least min_samples_leaf training samples in each of the left and right branches.
This may have the effect of smoothing the model, especially in regression.
max_features: int, float, string or None, optional (default=”auto”)
The number of features to consider when looking for the best split
if int, then consider max_features features at each split.
If float, then max_features is a fraction and int(max_features * n_features) features are considered at each split.
If “auto”, then max_features=n_features.
If “sqrt”, then max_features=sqrt(n_features).
If “log2”, then max_features=log2(n_features).
If None, then max_features=n_features.
max_samples: int or float, default=None
If bootstrap is True, the number of samples to draw from X to train each base estimator.
If None (default), then draw X.shape[0] samples.
If int, then draw max_samples samples.
If float, then draw max_samples * X.shape[0] samples. Thus, max_samples should be in the interval (0, 1)
"""
self.coltype_dict = None
self.mask_memo_dict = None
self.sorted_col = None
self.stop = False
self.rf_reg = RandomForestRegressor(n_estimators=n_estimators,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
min_samples_split=min_samples_split)
self.rf_cla = RandomForestClassifier(n_estimators=n_estimators,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
min_samples_split=min_samples_split)
self.imp_continuous_index = None
self.imp_categorical_index = None
self.normalizer = normalizer
Solver.__init__(self,
normalizer=normalizer)
def solve(self, X, missing_mask):
X = check_array(X, force_all_finite=False)
self.sorted_col = self.sort_col(missing_mask)
self.coltype_dict = self._judge_type(X)
self.imp_continuous_index, self.imp_categorical_index = \
self.get_type_index(missing_mask, self.coltype_dict)
differ_categorical = float('inf')
differ_continuous = float('inf')
init_fill = X
while self.stop is False:
differ_categorical_old = differ_categorical
differ_continuous_old = differ_continuous
x_old_imp = init_fill
x_new_imp = []
for col in self.sorted_col:
tmp = []
if self.coltype_dict[col] is 'categorical':
model = self.rf_cla
else:
model = self.rf_reg
x_obs, y_obs, x_mis = self.split(init_fill, col, missing_mask)
model.fit(x_obs, y_obs)
y_mis = model.predict(x_mis)
for ele in y_mis:
tmp.append(ele)
x_new_imp.append(ele)
init_fill[:, col][missing_mask[:,col]] = tmp
x_new_imp = np.asarray(x_new_imp)
differ_continuous, differ_categorical = self._lose_func(x_new_imp, x_old_imp)
if differ_continuous >= differ_continuous_old and differ_categorical >= differ_categorical_old:
self.stop = True
return init_fill
def _lose_func(self, imp_new, imp_old):
"""
Evaluation Method, mathematical concept are available at 'https://www.stu-zhouyc.com/iterForest/metrics'
:param imputed_data_old: a dict like {'col name':[predicted value1,...],...}
the dict contains original missing index which is part of the original data
its the last estimated data
accompany with brand-new imputed data, they are going to be evaluate.
:return:
"""
continuous_imp_new = imp_new[self.imp_continuous_index]
continuous_imp_old = imp_old[self.imp_continuous_index]
categorical_imp_new = imp_new[self.imp_categorical_index]
categorical_imp_old = imp_old[self.imp_categorical_index]
try:
continuous_div = continuous_imp_new - continuous_imp_old
continuous_div = continuous_div.dot(continuous_div)
continuous_sum = continuous_imp_new.dot(continuous_imp_new)
categorical_count = np.sum(categorical_imp_new == categorical_imp_old)
categorical_var_len = len(categorical_imp_new)
except:
categorical_var_len = 0.01
categorical_count = 0
continuous_div = 0
continuous_sum = 0.001
if categorical_var_len is 0:
categorical_differ = 0
else:
categorical_differ = categorical_count / categorical_var_len
if continuous_sum is 0:
continuous_differ = 0
else:
continuous_differ = continuous_div / continuous_sum
return continuous_differ, categorical_differ | 2.96875 | 3 |
zisan/Seg/davisinteractive/metrics/__init__.py | JintuZheng/zisan | 40 | 12769525 | from __future__ import absolute_import
from .jaccard import batched_f_measure, batched_jaccard
| 1.007813 | 1 |
Algorithm/jump_step.py | FYPYTHON/method | 0 | 12769526 | <reponame>FYPYTHON/method
# coding=utf-8
import time
# nums = [2, 3, 1, 1, 4]
# nums = [1, 2, 3]
# nums = [2, 0, 1]
# nums = [1, 1, 1, 1]
nums = [2, 1, 1, 1, 1]
class Solution(object):
def jump(self, nums):
"""
nums = [2,3,1,1,4]
数组的每一项为能跳跃的步数,求最小跳跃次数到达数组末端。
:type nums: List[int]
:rtype: int
"""
length = len(nums)
index = 0
count = 0
if length <= 1:
return count
while True:
if nums[index] >= length - 1 - index:
count += 1
return count
max = 0
tmp = 0
for i in range(1, nums[index] + 1):
if i + nums[i + index] > max:
max = i + nums[i + index]
tmp = i + index
index = tmp
count += 1
if __name__ == "__main__":
start = time.time()
so = Solution()
print(so.jump(nums))
end = time.time()
print("time:", end - start) | 3.75 | 4 |
discretize/utils/code_utils.py | ngodber/discretize | 123 | 12769527 | <filename>discretize/utils/code_utils.py
import numpy as np
import warnings
SCALARTYPES = (complex, float, int, np.number)
def is_scalar(f):
"""Determine if the input argument is a scalar.
The function **is_scalar** returns *True* if the input is an integer,
float or complex number. The function returns *False* otherwise.
Parameters
----------
f :
Any input quantity
Returns
-------
bool :
- *True* if the input argument is an integer, float or complex number
- *False* otherwise
"""
if isinstance(f, SCALARTYPES):
return True
elif isinstance(f, np.ndarray) and f.size == 1 and isinstance(f[0], SCALARTYPES):
return True
return False
def as_array_n_by_dim(pts, dim):
"""Ensures the given array will have *dim* columns.
The function **as_array_n_by_dim** will examine the *pts* array,
and coerce it to be at least if the number of columns is equal to *dim*.
This is similar to the :func:`numpy.atleast_2d`, except that it ensures that then
input has *dim* columns, and it appends a :data:`numpy.newaxis` to 1D arrays
instead of prepending.
Parameters
----------
pts : array_like
array to check.
dim : int
The number of columns which *pts* should have
Returns
-------
(n_pts, dim) numpy.ndarray
verified array
"""
if type(pts) == list:
pts = np.array(pts)
if not isinstance(pts, np.ndarray):
raise TypeError("pts must be a numpy array")
if dim > 1:
pts = np.atleast_2d(pts)
elif len(pts.shape) == 1:
pts = pts[:, np.newaxis]
if pts.shape[1] != dim:
raise ValueError(
"pts must be a column vector of shape (nPts, {0:d}) not ({1:d}, {2:d})".format(
*((dim,) + pts.shape)
)
)
return pts
def requires(modules):
"""Decorator to wrap functions with soft dependencies.
This function was inspired by the `requires` function of pysal,
which is released under the 'BSD 3-Clause "New" or "Revised" License'.
https://github.com/pysal/pysal/blob/master/pysal/lib/common.py
Parameters
----------
modules : dict
Dictionary containing soft dependencies, e.g.,
{'matplotlib': matplotlib}.
Returns
-------
decorated_function : function
Original function if all soft dependencies are met, otherwise
it returns an empty function which prints why it is not running.
"""
# Check the required modules, add missing ones in the list `missing`.
missing = []
for key, item in modules.items():
if item is False:
missing.append(key)
def decorated_function(function):
"""Wrap function."""
if not missing:
return function
else:
def passer(*args, **kwargs):
print(("Missing dependencies: {d}.".format(d=missing)))
print(("Not running `{}`.".format(function.__name__)))
return passer
return decorated_function
def deprecate_class(removal_version=None, new_location=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
def decorator(cls):
my_name = cls.__name__
parent_name = cls.__bases__[0].__name__
message = f"{my_name} has been deprecated, please use {parent_name}."
if removal_version is not None:
message += (
f" It will be removed in version {removal_version} of discretize."
)
else:
message += " It will be removed in a future version of discretize."
# stash the original initialization of the class
cls._old__init__ = cls.__init__
def __init__(self, *args, **kwargs):
warnings.warn(message, Warning)
self._old__init__(*args, **kwargs)
cls.__init__ = __init__
if new_location is not None:
parent_name = f"{new_location}.{parent_name}"
cls.__doc__ = f""" This class has been deprecated, see `{parent_name}` for documentation"""
return cls
return decorator
def deprecate_module(old_name, new_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
message = f"The {old_name} module has been deprecated, please use {new_name}."
if removal_version is not None:
message += f" It will be removed in version {removal_version} of discretize"
else:
message += " It will be removed in a future version of discretize."
message += " Please update your code accordingly."
warnings.warn(message, Warning)
def deprecate_property(new_name, old_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def get_dep(self):
class_name = type(self).__name__
message = (
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag
)
warnings.warn(message, Warning)
return getattr(self, new_name)
def set_dep(self, other):
class_name = type(self).__name__
message = (
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag
)
warnings.warn(message, Warning)
setattr(self, new_name, other)
doc = f"""
`{old_name}` has been deprecated. See `{new_name}` for documentation.
See Also
--------
{new_name}
"""
return property(get_dep, set_dep, None, doc)
def deprecate_method(new_name, old_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def new_method(self, *args, **kwargs):
class_name = type(self).__name__
warnings.warn(
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag,
Warning,
)
return getattr(self, new_name)(*args, **kwargs)
doc = f"""
`{old_name}` has been deprecated. See `{new_name}` for documentation
See Also
--------
{new_name}
"""
new_method.__doc__ = doc
return new_method
def deprecate_function(new_function, old_name, removal_version=None, future_warn=False):
if future_warn:
Warning = FutureWarning
else:
Warning = DeprecationWarning
new_name = new_function.__name__
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def dep_function(*args, **kwargs):
warnings.warn(
f"{old_name} has been deprecated, please use {new_name}." + tag,
Warning,
)
return new_function(*args, **kwargs)
doc = f"""
`{old_name}` has been deprecated. See `{new_name}` for documentation
See Also
--------
{new_name}
"""
dep_function.__doc__ = doc
return dep_function
# DEPRECATIONS
isScalar = deprecate_function(is_scalar, "isScalar", removal_version="1.0.0", future_warn=False)
asArray_N_x_Dim = deprecate_function(
as_array_n_by_dim, "asArray_N_x_Dim", removal_version="1.0.0", future_warn=False
)
| 3.765625 | 4 |
libs/python/stupendous_cow/importer/generic_ss/director.py | tomault/stupendous-cow | 0 | 12769528 | <filename>libs/python/stupendous_cow/importer/generic_ss/director.py
from stupendous_cow.data_model import Article, ArticleType, Category, Venue
from stupendous_cow.extractors import ExtractedDocument
from stupendous_cow.util import normalize_title
from stupendous_cow.builders import ArticleBuilder, BooleanPropertyBinder, \
ConstantPropertyBinder, ConstantPropertyExtractor, \
DatabasePropertyBinder, DocumentPropertyBinder, IntPropertyBinder, \
PropertyBinder, PropertyExtractionError, SpreadsheetPropertyBinder, \
SpreadsheetPropertyExtractor
from stupendous_cow.importers.generic_ss.configuration import Configuration
from stupendous_cow.importers.abstracts import ABSTRACT_READER_FACTORIES
from stupendous_cow.importers.extractors import DOCUMENT_EXTRACTOR_FACTORIES
from stupendous_cow.importers.spreadsheets import SpreadsheetPath
import logging
import os.path
class DocumentGroupProcessor:
_empty_extracted_document = ExtractedDocument('', (), '', '')
def __init__(self, configuration, db, venue, year, abstracts):
def set_title_extractor():
ts = configuration.title_source
if isinstance(ts, SpreadsheetPath):
self.sheet_names.add(ts.sheet)
self._set_title = SpreadsheetPropertyBinder(ts, 'title', '')
elif ts == 'extracted':
self._set_title = DocumentPropertyBinder.TITLE
else:
raise ValueError('Invalid title source')
def set_abstract_extractor():
ab_src = configuration.abstract_source
if not ab_src:
self._set_abstract = ConstantPropertyBinder('abstract', '')
elif isinstance(ab_src, SpreadsheetPath):
self.sheet_names.add(ab_src.sheet)
self._set_abstract = \
SpreadsheetPropertyBinder(ab_src, 'abstract', '')
elif ab_src == 'file':
self._set_abstract = self._set_abstract_from_map
elif ab_src == 'extracted':
self._set_abstract = DocumentPropertyBinder.ABSTRACT
else:
raise ValueError('Invalid abstract source')
def ss_constant_or_optional_extractor(source, property_name,
default_value):
if not source:
return ConstantPropertyExtractor(default_value)
elif isinstance(source, SpreadsheetPath):
self.sheet_names.add(source.sheet)
return SpreadsheetPropertyExtractor(source, default_value)
else:
return ConstantPropertyExtractor(source)
def ss_constant_or_optional_binder(source, property_name,
default_value):
extractor = ss_constant_or_optional_extractor(source, default_value)
return PropertyBinder(property_name, extractor)
def set_priority_extractor():
base_extractor =
ss_constant_or_optional_extractor(\
configuration.priority_source, 0)
return IntPropertyBinder('priority', base_extractor)
def set_article_type_extractor():
base_extractor = \
ss_constant_or_optional_extractor(\
configuration.article_type_source, None)
self._set_article_type = \
DatabasePropertyBinder('article_type', db.article_types,
base_extractor, ArticleType)
def set_category_extractor():
base_extractor = \
ss_constant_or_optional_extractor(\
configuration.category_source, None)
self._set_category = \
DatabasePropertyBinder('category', db.categories,
base_extractor, Category)
def set_summary_extractor():
content_source = configuration.summary_content_source
if not content_source:
self._set_summary = ConstantPropertyBinder('summary', '')
elif isinstance(content_source, SpreadsheetPath):
self.sheet_names.add(content_source.sheet)
self._set_summary = \
SpreadsheetPropertyBinder(content_source, 'summary', '')
else:
raise ValueError('Invalid summary content source')
def set_is_read_extractor():
base_extractor = \
ss_constant_or_optional_extractor(configuration.is_read_source,
False)
self._set_is_read = BooleanPropertyBinder('is_read', base_extractor)
def set_document_extractor():
doc_ext = configuration.article_extractor
if not doc_ext:
self.document_extractor = None
else:
try:
fac = DOCUMENT_EXTRACTOR_FACTORIES[doc_ext]
except KeyError:
msg = 'Unknown document extractor "%s"'
raise ValueError(msg % doc_ext)
self.document_extractor = fac()
self.group_name = configuration.config_name
self.content_dirs = configuration.content_dirs
self.downloaded_as_path = configuration.downloaded_as_source
self.sheet_names = set()
self.db = db
self.venue = venue
self.year = year
self.abstracts = abstracts
set_document_extractor()
set_title_extractor()
set_abstract_extractor()
set_priority_extractor()
set_article_type_extractor()
set_category_extractor()
set_summary_extractor()
set_is_read_extractor()
self._get_downloaded_as = \
SpreadsheetPropertyExtractor(self.downloaded_as_path, None)
def process(self, workbook, db):
row_iterators = dict((n, iter(workbook[n])) for n in self._sheet_names)
self._next_row(row_iterators) # Skip headers
row_index = 2
num_articles = 0
num_failed = 0
while row_iterators:
rows = self._next_row(row_iterators)
logging.debug('Process row %s from sheets %s' % (row_index,
', '.join(rows)))
downloaded_as = self._get_downloaded_as(rows, None)
if not downloaded_as:
logging.debug('Row %s has no downloaded_as property' % row_index)
document = self._empty_extracted_document
else:
pdf_path = self._find_article_pdf(downloaded_as)
if pdf_path:
document = self._fetch_document(pdf_path)
else:
msg = 'Could not find PDF file for article downloaded ' + \
'as %s.pdf'
logging.error(msg % downloaded_as)
document = self._empty_extracted_document
logging.debug('Build article')
builder = ArticleBuilder(db)
builder.set_ss_info(self.downloaded_as_path.sheet, row_index)
builder.set_year(self.year)
builder.set_downloaded_as(downloaded_as)
builder.set_pdf_file(pdf_path)
builder.set_venue(self.venue)
try:
self._set_title(row_index, rows, document, builder)
self._set_abstract(row_index, rows, document, builder)
self._set_priority(row_index, rows, document, builder)
self._set_article_type(row_index, rows, document, builder)
self._set_category(row_index, rows, document, builder)
self._set_summary(row_index, rows, document, builder)
self._set_is_read(row_index, rows, document, builder)
article = builder.build()
except PropertyExtractionError as e:
msg = 'Could not construct article for %s, row %d (%s)'
logging.error(msg % (self.downloaded_as_path.sheet,
row_index, e.reason))
article = None
if article:
self._save_article(db, article)
num_articles += 1
else:
num_failed += 1
row_index += 1
return (num_articles, num_failed)
def _next_row(self, row_iterators):
rows = { }
for (name, i) in iterators.iteritems():
try:
rows[name] = next(i)
except StopIteration:
del iterators[name]
return rows
def _find_article_pdf(self, downloaded_as):
if not downloaded_as.endswith('.pdf'):
downloaded_as += '.pdf'
for content_dir in self.content_dirs:
path = os.path.join(content_dir, downloaded_as)
if os.path.isfile(path):
return path
return None
def _fetch_document(self, path):
if not self.document_extractor:
logging.debug('Document not loaded because no extractor is ' + \
'configured')
return _empty_extracted_document
try:
logging.debug('Load document from %s' % path)
return self.document_extractor.extract(path)
except PdfExtractionError as e:
logging.error(e.details)
return _empty_extracted_document
def _set_abstract_from_map(self, ss_rows, document, builder):
key = normalize_title(builder.title)
try:
builder.set_abstract(self.abstracts[key])
except KeyError:
msg = 'Could not find abstract for document [%s]'
logging.warn(msg % builder.title)
def _save_article(self, db, article):
nt = normalize_title(article.title)
logging.debug('Save article with normalized title [%s]' % nt)
retrieved = db.articles.retrieve(normalized_title = nt,
year = article.year,
venue = article.venue)
if not retrieved:
logging.debug('Write new article to database')
db.articles.add(article)
elif len(retrieved) == 1:
logging.debug('Update existing article')
retrieved[0].update(article)
db.articles.update(retrieved[0])
else:
msg = 'Retrieved %d articles for %s %s with normalized title ' + \
'[%s]. This should not have happened. The article was ' + \
'not updated. Please investigate.'
logging.error(msg % (article.venue.abbreviation, article.year, nt))
class Director:
def __init__(self, configuration, db):
venue = db.venues.with_abbreviation(configuration.venue)
if not venue:
raise ValueError('Unknown venue "%s"' % configuration.venue)
self.venue = venue
self.year = configuration.year
self.groups = configuration.groups
def process(self, workbook, db):
total_imported = 0
total_failed = 0
for configuration in self.groups:
config_name = configuration.config_name
logging.info('Importing document group %s' % config_name)
if configuration.abstract_source == 'file':
abstract_map = \
self._load_abstracts(configuration.abstracts_file_reader
configuration.abstracts_file_name)
else:
abstract_map = None
processor = DocumentGroupProcessor(configuration, db, self.venue,
self.year, abstract_map)
(num_imported, num_failed) = processor.process(workbook)
logging.info('Loaded %d articles (%d failed) from %s' % (num_imported, num_failed, config_name))
total_imported += num_imported
total_failed += num_failed
logging.info('Imported %d articles from %d groups with %d failures' % (total_imported, len(self.groups), total_failed))
return (total_imported, total_failed)
def _load_abstracts(self, reader_name, file_name):
reader = ABSTRACT_READER_FACTORIES[reader_name](file_name)
abstracts = dict((normalize_title(title), body) \
for (title, body) in reader)
logging.info('Loaded abstracts from %s using %s' % (file_name,
reader_name))
return abstracts
| 2.046875 | 2 |
pptx_template/__init__.py | skar404/pptx-template | 1 | 12769529 | <reponame>skar404/pptx-template<gh_stars>1-10
# coding=utf-8
__version__ = '0.2.7.6'
| 0.945313 | 1 |
Multidimensional_Lists/09E_ot_kolejka_github.py | MihailMarkovski/Python-Advanced-2020 | 4 | 12769530 | <filename>Multidimensional_Lists/09E_ot_kolejka_github.py<gh_stars>1-10
(rows_count, columns_count) = map(int, input().split())
matrix = []
new_matrix = []
player_position = []
player_wins = False
for i in range(rows_count):
row = [x for x in input()]
if "P" in row:
player_position = [i, row.index("P")]
row[row.index("P")] = "."
row2 = row.copy()
matrix.append(row)
new_matrix.append(row2)
commands = list(input())
for command in commands:
next_player_position = []
if command == "U":
next_player_position = [player_position[0] - 1, player_position[1]]
elif command == "D":
next_player_position = [player_position[0] + 1, player_position[1]]
elif command == "L":
next_player_position = [player_position[0], player_position[1] - 1]
elif command == "R":
next_player_position = [player_position[0], player_position[1] + 1]
for r in range(rows_count):
for ch in range(columns_count):
if matrix[r][ch] == "B":
if r - 1 >= 0:
new_matrix[r - 1][ch] = "B"
if ch - 1 >= 0:
new_matrix[r][ch - 1] = "B"
if ch + 1 < columns_count:
new_matrix[r][ch + 1] = "B"
if r + 1 < rows_count:
new_matrix[r + 1][ch] = "B"
matrix = [x.copy() for x in new_matrix]
if 0 <= next_player_position[0] <= rows_count - 1 and 0 <= next_player_position[1] <= columns_count - 1:
player_position = next_player_position
if matrix[player_position[0]][player_position[1]] == "B":
break
else:
player_wins = True
break
[print("".join(x)) for x in matrix]
if player_wins:
print(f"won: {player_position[0]} {player_position[1]}")
else:
print(f"dead: {player_position[0]} {player_position[1]}")
| 2.9375 | 3 |
mute_alsa.py | bkrishnamachari/musiclifx | 1 | 12769531 | <filename>mute_alsa.py
# code to mute alsa error messages
# taken entirely from * http://stackoverflow.com/questions/36956083/how-can-the-terminal-output-of-executables-run-by-python-functions-be-silenced-i/36966379#36966379
# license file does not apply to this code
import ctypes
ERROR_HANDLER_FUNC = ctypes.CFUNCTYPE(None, ctypes.c_char_p, ctypes.c_int,
ctypes.c_char_p, ctypes.c_int,
ctypes.c_char_p)
def py_error_handler(filename, line, function, err, fmt):
pass
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
try:
asound = ctypes.cdll.LoadLibrary('libasound.so.2')
asound.snd_lib_error_set_handler(c_error_handler)
except OSError:
pass
| 2.234375 | 2 |
test/fx2trt/converters/acc_op/test_unsqueeze.py | batzner/pytorch | 0 | 12769532 | <reponame>batzner/pytorch
import torch
import torch.fx
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
import torch.nn as nn
from caffe2.torch.fb.fx2trt.tests.test_utils import AccTestCase, InputTensorSpec
class TestUnsqueeze(AccTestCase):
def test_unsqueeze(self):
class Unsqueeze(nn.Module):
def forward(self, x):
return torch.unsqueeze(x, 1)
inputs = [torch.randn(1, 2, 3)]
self.run_test(Unsqueeze(), inputs, expected_ops={acc_ops.unsqueeze})
def test_unsqueeze_with_dynamic_shape(self):
class Unsqueeze(nn.Module):
def forward(self, x):
return torch.unsqueeze(x, 1)
input_specs = [
InputTensorSpec(
shape=(-1, 2, 3),
dtype=torch.float32,
shape_ranges=[((1, 2, 3), (2, 2, 3), (3, 2, 3))],
),
]
self.run_test_with_dynamic_shape(
Unsqueeze(), input_specs, expected_ops={acc_ops.unsqueeze}
)
| 2.34375 | 2 |
reinvent_scoring/scoring/enums/rocs_specific_parameters_enum.py | MolecularAI/reinvent-scoring | 0 | 12769533 | <filename>reinvent_scoring/scoring/enums/rocs_specific_parameters_enum.py
from dataclasses import dataclass
@dataclass(frozen=True)
class ROCSSpecificParametersEnum():
ROCS_INPUT = "rocs_input"
INPUT_TYPE = "input_type"
SHAPE_WEIGHT = "shape_weight"
COLOR_WEIGHT = "color_weight"
SIM_MEASURE = "similarity_measure"
MAX_CPUS = "max_num_cpus"
CUSTOM_CFF = "custom_cff"
SAVE_ROCS_OVERLAYS = "save_rocs_overlays"
ROCS_OVERLAYS_DIR = "rocs_overlays_dir"
ROCS_OVERLAYS_PREFIX = "rocs_overlays_prefix"
ENUM_STEREO = "enumerate_stereo"
MAX_STEREO = "max_stereocenters"
NEGATIVE_VOLUME = "negative_volume"
PROTEIN_NEG_VOL_FILE = "protein_neg_vol_file"
LIGAND_NEG_VOL_FILE = "ligand_neg_vol_file"
MAX_CONFS = "max_confs"
EWINDOW = "ewindow"
| 2.3125 | 2 |
paranestamol/__init__.py | appetrosyan/paranestamol | 2 | 12769534 | import os
from paranestamol.utils import Legend, cleanupFileRoot
if os.getenv('QT_API') != "PySide2":
raise RuntimeError(f'TL;DR \n\n`QT_API=PySide2 python3 -m paranestamol `\n\n The moron who\
designed `matplotlib_backend_qtquick` hard-coded a preference for\
pyqt5 for their backend, despite supporting PySide2 full well. \nAdd\
this to your .basrc file as a workaround, or better yet, inconvenience\
said moron at https://github.com/jmitrevs/matplotlib_backend_qtquick')
# This is not an unused import. This is because Python has the Zen
# statement of "There should be (preferably) one obvious way to do
# things" that roughly translates to "There should be at least two
# different ways to do one thing. The more obvious way(s) should be
# wrong, incompatible or somehow deficient but in a non-obvious
# manner. " Why can't you agree on keeping *one* backend to Qt?
from matplotlib_backend_qtquick.backend_qtquickagg import FigureCanvasQtQuickAgg as FigureCanvasQML
from matplotlib_backend_qtquick.qt_compat import QtQml, QtGui, QtWidgets, QtCore
| 2.03125 | 2 |
units.py | solomondg/ModSim_Proj2 | 0 | 12769535 | <gh_stars>0
from pint import UnitRegistry
units = UnitRegistry()
| 1.242188 | 1 |
dmarc_reporting/__init__.py | virtualtam/django-dmarc-reporting | 4 | 12769536 | """django-dmarc-reporting"""
__author__ = "VirtualTam"
__title__ = "django-dmarc-reporting"
__version__ = '0.1'
default_app_config = 'dmarc_reporting.apps.DmarcReportingConfig'
| 1.125 | 1 |
tgs/scripts/train_keras.py | zaxcie/TGS-Salt-identification-Kaggle | 0 | 12769537 | <gh_stars>0
from tgs.models.UNet import get_unet
from tgs.data.images import HEIGHT, WIDTH, Image, ImageSet
from tgs.data.split import get_train_val_ids
from tgs.utils.mlflow import find_or_create_experiment
from tgs.utils.metrics import tf_mean_iou
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping, History
import mlflow
import os
import json
# TODO MLFlow
# TODO Parse arguments
if __name__ == '__main__':
EXP_NAME = "UNet"
exp = find_or_create_experiment(EXP_NAME, mlflow.tracking.list_experiments())
if isinstance(exp, list):
raise TypeError("Multiple experiment with that name where found.") # Not sure it's possible tho
with mlflow.start_run(experiment_id=exp.experiment_id):
active_run = mlflow.active_run()
# Idea Could be cool to have a wrapper for this...
# Parameters
loss = "binary_crossentropy"
mlflow.log_param("loss", loss)
optimizer = "SGD"
mlflow.log_param("optimizer", optimizer)
es_patience = 3
mlflow.log_param("es_parience", es_patience)
exp_type = "unittest"
mlflow.log_param("exp_type", exp_type)
batch_size = 32
mlflow.log_param("batch_size", batch_size)
epochs = 1000
mlflow.log_param("epochs", epochs)
# Callbacks
early_stopping = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=es_patience,
verbose=1, mode='auto')
tensorboard = TensorBoard(log_dir=active_run.info.artifact_uri, histogram_freq=0,
write_graph=True, write_images=True)
checkpoint = ModelCheckpoint(active_run.info.artifact_uri + "/model.h5",
monitor='val_loss', verbose=1, save_best_only=True,
save_weights_only=False, mode='auto', period=1)
history = History()
callbacks = [early_stopping, tensorboard, checkpoint, history]
model = get_unet((HEIGHT, WIDTH, 1))
imgs_train = list()
imgs_val = list()
img_path = "data/raw/train"
# TODO Later - Cache ImageSet object numpy save
train_ids, val_ids = get_train_val_ids(img_path + "/images/")
for img_id in os.listdir(img_path + "/images/"):
if img_id in train_ids:
imgs_train.append(Image(img_id, img_path))
else:
imgs_val.append(Image(img_id, img_path))
trainset = ImageSet(imgs_train, HEIGHT, WIDTH, 1)
valset = ImageSet(imgs_val, HEIGHT, WIDTH, 1)
X_train, y_train = trainset.get_x_y()
X_val, y_val = valset.get_x_y()
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy', tf_mean_iou])
print(model.summary())
with open(active_run.info.artifact_uri + "/network_architecture.json", "w") as f:
json.dump(model.to_json(), f)
# TODO unit testesting should be done differently
if exp_type == "unittest":
n = 16
else:
n = len(X_train)
# Launch training
model.fit(x=X_train[:n], y=y_train[:n], epochs=epochs, verbose=1, callbacks=callbacks,
validation_data=(X_val, y_val), batch_size=batch_size)
for metric in history.history:
for i in range(len(history.history[metric])):
mlflow.log_metric(metric, history.history[metric][i])
mlflow.log_metric("trained_epoch", len(history.history["loss"]))
| 2.15625 | 2 |
date.py | himanshushukla254/Python_Semantics | 0 | 12769538 | <gh_stars>0
from semantic.dates import DateService
service = DateService()
date = service.extractDate("On March 3 at 12:15pm...")
print(date);
| 2.40625 | 2 |
tricolor/tricolor.py | nathanrooy/tricolor-map-legend | 1 | 12769539 | #------------------------------------------------------------------------------+
#
# <NAME>
# Create a three color triangle
# 2017-DEC
#
#------------------------------------------------------------------------------+
#--- IMPORT DEPENDENCIES ------------------------------------------------------+
from __future__ import division
from math import sin
from math import radians
#--- MAIN ---------------------------------------------------------------------+
def calc_dist(x0, y0, x1, y1):
return ((x0 - x1)**2 + (y0 - y1)**2)**0.5
def clamp(x):
return max(0, min(int(round(x,0)), 255))
def interpolate_color(color_info, x, y):
c1_dist = calc_dist(color_info['c1_x'], color_info['c1_y'], x, y)
c2_dist = calc_dist(color_info['c2_x'], color_info['c2_y'], x, y)
c3_dist = calc_dist(color_info['c3_x'], color_info['c3_y'], x, y)
dist_total = c1_dist + c2_dist + c3_dist
c1_norm = 1 - (c1_dist / color_info['max dist'])
c2_norm = 1 - (c2_dist / color_info['max dist'])
c3_norm = 1 - (c3_dist / color_info['max dist'])
new_r = (c1_norm * color_info['c1'][0]) + (c2_norm * color_info['c2'][0]) + (c3_norm * color_info['c3'][0])
new_g = (c1_norm * color_info['c1'][1]) + (c2_norm * color_info['c2'][1]) + (c3_norm * color_info['c3'][1])
new_b = (c1_norm * color_info['c1'][2]) + (c2_norm * color_info['c2'][2]) + (c3_norm * color_info['c3'][2])
new_color_hex = "#{0:02x}{1:02x}{2:02x}".format(clamp(new_r), clamp(new_g), clamp(new_b))
return new_color_hex
def svg_color_triangle(tri_rows, c1, c2, c3, file_name, edge_len=1000):
# CALCULATE GEOMETRY CONSTANTS
tri_base_len = edge_len / tri_rows
tri_height = sin(radians(60)) * edge_len
tri_row_height = tri_height / tri_rows
# CALCULATE COLOR CONSTANTS
color_info = {}
color_info['c1_x'] = edge_len / 2
color_info['c1_y'] = tri_height - (tri_row_height / 2)
color_info['c2_x'] = tri_base_len / 2
color_info['c2_y'] = tri_row_height * 0.5
color_info['c3_x'] = edge_len - (tri_base_len / 2)
color_info['c3_y'] = tri_row_height * 0.5
color_info['max dist'] = color_info['c3_x'] - color_info['c2_x']
color_info['c1'] = c1
color_info['c2'] = c2
color_info['c3'] = c3
# CREATE INITIAL SVG FILE
with open(f'{file_name}.svg','w') as out_file:
print('<?xml version="1.0" encoding="UTF-8" standalone="no"?>', file=out_file)
print(f'<svg xmlns="http://www.w3.org/2000/svg" version="1.1" width="{edge_len}px" height="{tri_height}px">', file=out_file)
# START WITH THE TOP OF THE TRIANGLE AND WORK DOWN
for row in range(0, tri_rows):
# CALCULATE ROW CONSTANTS
row_y_top = tri_height - (tri_row_height * row)
row_y_bot = tri_height - (tri_row_height * (row + 1))
row_x_top = (edge_len/2) - (row * tri_base_len * 0.5)
row_x_bot = (edge_len/2) - ((row + 1) * tri_base_len * 0.5)
# GENERATE TRIANGLES
if row >= 1:
# CYCLE THROUGH EACH ROW
for i in range(0,row):
# GENERATE TWO TRIANGLE PAIRS
t1_0_x = row_x_top + (i * tri_base_len) # top
t1_0_y = row_y_top # top
t1_1_x = row_x_bot + (i * tri_base_len) # bottom left
t1_1_y = row_y_bot # bottom left
t1_2_x = row_x_bot + ((i+1) * tri_base_len) # bottom right
t1_2_y = row_y_bot # bottom right
t1_c_x = t1_0_x # triangle centroid
t1_c_y = (row_y_bot + row_y_top) / 2 # triangle centroid
t1_color = interpolate_color(color_info, t1_c_x, t1_c_y)
t2_0_x = row_x_top + ((i+1) * tri_base_len) # top right
t2_0_y = row_y_top # top right
t2_1_x = row_x_top + (i * tri_base_len) # top left
t2_1_y = row_y_top # top left
t2_2_x = row_x_bot + ((i+1) * tri_base_len) # bottom
t2_2_y = row_y_bot # bottom
t2_c_x = row_x_bot + ((i+1) * tri_base_len) # triangle centroid
t2_c_y =(row_y_bot + row_y_top) / 2 # triangle centroid
t2_color = interpolate_color(color_info, t2_c_x, t2_c_y)
# WRITE TRIANGLES TO SVG FILE
print('<polygon points="'+str(t1_0_x)+','+str(t1_0_y)+' '+str(t1_1_x)+','+str(t1_1_y)+' '+str(t1_2_x)+','+str(t1_2_y)+'" fill="'+t1_color+'" stroke="'+t1_color+'"/>', file=out_file)
print('<polygon points="'+str(t2_0_x)+','+str(t2_0_y)+' '+str(t2_1_x)+','+str(t2_1_y)+' '+str(t2_2_x)+','+str(t2_2_y)+'" fill="'+t2_color+'" stroke="'+t2_color+'"/>', file=out_file)
# GENERATE LAST TRIANGLE IN ROW
t3_0_x = edge_len - row_x_top # top
t3_0_y = row_y_top # top
t3_1_x = edge_len - row_x_bot - tri_base_len # bottom left
t3_1_y = row_y_bot # bottom left
t3_2_x = edge_len - row_x_bot # bottom right
t3_2_y = row_y_bot # bottom right
t3_c_x = t3_0_x # triangle centroid
t3_c_y = (row_y_bot + row_y_top) / 2 # triangle centroid
t3_color = interpolate_color(color_info, t3_c_x, t3_c_y)
# WRITE TRIANGLE TO SVG FILE
print('<polygon points="'+str(t3_0_x)+','+str(t3_0_y)+' '+str(t3_1_x)+','+str(t3_1_y)+' '+str(t3_2_x)+','+str(t3_2_y)+'" fill="'+t3_color+'" stroke="'+t3_color+'"/>', file=out_file)
# FINISH SVG FILE AND CLOSE
print('</svg>', file=out_file)
out_file.close()
pass
#--- END ----------------------------------------------------------------------+
| 3.765625 | 4 |
python/pythonrc.py | inky/dotfiles | 2 | 12769540 | <reponame>inky/dotfiles
# -----------------------------------------------------------------------------
# Python repl config
# -----------------------------------------------------------------------------
import datetime
import math
import os
import random
import re
import sys
from math import *
from pydoc import pager
try:
from see import see
except ImportError:
see = dir
PY = sys.version_info[0]
class Term:
RESET = "\x1b[0m"
BOLD = "\x1b[1m"
DIM = "\x1b[2m"
UNBOLD = "\x1b[21m"
UNDIM = "\x1b[22m"
RED = "\x1b[31m"
GREEN = "\x1b[32m"
YELLOW = "\x1b[33m"
BLUE = "\x1b[34m"
PINK = "\x1b[35m"
CYAN = "\x1b[36m"
@classmethod
def color(cls, string, color, uncolor=RESET):
return ''.join((color, string, uncolor))
@classmethod
def dim(cls, string):
return ''.join((cls.DIM, string, cls.UNDIM))
@classmethod
def setup_prompt(cls):
version = '.'.join(str(s) for s in sys.version_info[:2])
sys.ps1 = '(py%s)> ' % version
sys.ps2 = '%s ' % ('.' * 8)
Term.setup_prompt()
if PY < 3:
try:
import rlcompleter
if 'libedit' in rlcompleter.readline.__doc__:
rlcompleter.readline.parse_and_bind('bind ^I rl_complete')
else:
rlcompleter.readline.parse_and_bind('tab: complete')
except ImportError:
print("Install readline for tab completion.")
print('')
def take(seq, count=1):
queue = []
for item in seq:
queue.append(item)
if len(queue) == count:
yield tuple(queue)
queue = []
if queue:
yield tuple(queue)
def pairs(seq):
return take(seq, 2)
def enhex(seq):
if isinstance(seq, str):
seq = seq.encode('utf-8')
return ' '.join(hex(b)[2:].zfill(2) for b in seq).upper()
def dehex(s, utf8=True):
s = ''.join(s.lower().split())
if not all(c in '0123456789abcdef' for c in s):
raise ValueError('Not a hex string')
byteseq = bytes(int(''.join(p), 16) for p in pairs(s))
if utf8:
try:
return byteseq.decode('utf-8')
except UnicodeDecodeError:
pass
return byteseq
| 2.140625 | 2 |
styrobot/cogs/finance.py | ThatRedKite/styrobot | 1 | 12769541 | import yfinance
import discord
from discord.ext import commands
import threading
import asyncio
import traceback
import requests
import time
class Asset(object):
def __init__(self, symbol=None, name=None, price=None, url=None):
self.symbol = symbol
self.name = name
self.price = price
self.url = url
def to_embed(self):
e = discord.Embed(title=self.symbol)
e.add_field(name='Ticker', value=self.symbol, inline=False)
e.add_field(name='Name', value=self.name, inline=False)
e.add_field(name='Price (USD)', value=f'{self.price:0.4f}', inline=False)
if self.url is not None:
e.set_image(url=self.url)
return e
class Market(object):
def __init__(self, max_reqs, wait_time):
self.semaphore = threading.Semaphore(value=max_reqs)
self.wait_time = wait_time
async def release(self):
# wait, for rate limiting
await asyncio.sleep(self.wait_time)
self.semaphore.release()
async def get(self, symbol):
for i in range(20):
if self.semaphore.acquire(blocking=False):
try:
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None, self._get, symbol)
except Exception:
traceback.print_exc()
result = None
finally:
asyncio.create_task(self.release())
return result
await asyncio.sleep(0.1)
raise TimeoutError()
class StockMarket(Market):
def _get(self, symbol):
try:
t = yfinance.Ticker(symbol)
if symbol.startswith('^'):
return Asset(symbol=symbol, name=t.info['shortName'], price=t.info['regularMarketPrice'])
else:
return Asset(symbol=symbol, name=t.info['longName'], price=t.info['currentPrice'])
except KeyError:
return None
class OSRSMarket(Market):
base_url = 'https://secure.runescape.com/m=itemdb_oldschool/api/catalogue/items.json?category=1&alpha=%23&page=1'
def find_item(self, name, page=1):
if name[0].isnumeric():
url = f'https://secure.runescape.com/m=itemdb_oldschool/api/catalogue/items.json?category=1&alpha=%23&page={page}'
else:
url = f'https://secure.runescape.com/m=itemdb_oldschool/api/catalogue/items.json?category=1&alpha={name[0].lower()}&page={page}'
j = requests.get(url).json()
if len(j['items']) == 0:
# no more items
return None
# try to find exact items
items = [(x['name'], x['current']['price'], x['icon_large']) for x in j['items']]
for (n, p, i) in items:
if n.lower() == name:
# we found it
return (n, p, i)
# we didn't find it
# try searching deeper
# rate limiting
time.sleep(1.2)
r = self.find_item(name, page=page + 1)
if r is None:
# it doesn't exist verbatim anywhere
# try seeing if a similar item exists
for (n, p, i) in items:
if name in n.lower():
# we found it, it just has a longer name
return (n, p, i)
# it really isn't here
return None
# it does exist
return r
def _get(self, symbol):
x = self.find_item(symbol.lower())
if x is None:
return None
return Asset(symbol=symbol, name=x[0], price=x[1], url=x[2])
class ForexMarket(Market):
initial_lock = threading.Lock()
initial_done = False
full_names = {}
def _get(self, symbol):
while not self.initial_done:
if self.initial_lock.acquire(timeout=0.1):
try:
if not self.initial_done:
j = requests.get('https://api.frankfurter.app/currencies').json()
for (k, v) in j.items():
self.full_names[k] = v
self.initial_done = True
finally:
self.initial_lock.release()
ticker = symbol.upper()
if ticker not in self.full_names.keys():
for x in self.full_names:
if ticker in self.full_names[x].upper():
ticker = x
break
else:
return None
j = requests.get('https://api.frankfurter.app/latest?from=USD').json()
price = 1.0 / j['rates'][ticker]
return Asset(symbol=ticker, name=self.full_names[ticker], price=price)
class CryptoMarket(Market):
initial_lock = threading.Lock()
initial_done = False
coins = []
def _get(self, symbol):
while not self.initial_done:
if self.initial_lock.acquire(timeout=0.1):
try:
if not self.initial_done:
self.coins = requests.get('https://api.coingecko.com/api/v3/coins/list').json()
self.initial_done = True
finally:
self.initial_lock.release()
ticker = symbol
for item in self.coins:
if (item['name'].upper() == ticker.upper()) or (item['symbol'].upper() == ticker.upper()):
id = item['id']
j = requests.get(f'https://api.coingecko.com/api/v3/coins/{id}').json()
return Asset(symbol=j['symbol'].upper(), name=j['name'], price=j['market_data']['current_price']['usd'])
return None
class FinanceCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.stock_market = StockMarket(5, 30)
self.osrs_market = OSRSMarket(4, 30)
self.forex_market = ForexMarket(15, 30)
self.crypto_market = CryptoMarket(4, 30)
async def get_info(self, ticker):
if len(ticker) < 2:
raise ValueError()
ticker = ticker.strip()
t = ticker[0]
a = ticker[1:]
if any(not (c.isalnum() or (c in ' \'\"')) for c in a):
raise ValueError()
if t == '$':
return await self.stock_market.get(a)
elif t == '^':
return await self.stock_market.get('^' + a)
elif t == '%':
return await self.osrs_market.get(a)
elif t == '+':
return await self.forex_market.get(a)
elif t == ':':
return await self.crypto_market.get(a)
else:
raise ValueError()
@commands.group(name='stonks')
async def stonks(self, ctx: commands.Context):
pass
@stonks.command(name='info')
async def info(self, ctx: commands.Context, *ticker):
"""
$stock
^index
"%old school runescape"
+forex
:crypto
"""
ticker = ' '.join(ticker)
async with ctx.typing():
try:
r = await self.get_info(ticker)
if r is None:
await ctx.reply('An error occurred while fetching price data.')
else:
await ctx.reply(embed=r.to_embed())
except ValueError:
await ctx.reply('The ticker supplied is invalid')
except TimeoutError:
await ctx.reply('Too many requests; try again in a minute or so.')
except:
traceback.print_exc()
@stonks.command(name='convert')
async def convert(self, ctx: commands.Context, fro, to):
async with ctx.typing():
try:
fro_asset, to_asset = await asyncio.gather(
self.get_info(fro),
self.get_info(to)
)
if (fro_asset is None) or (to_asset is None):
await ctx.reply('An error occurred while fetching at least one of the assets')
else:
e = discord.Embed(title='Asset Conversion')
e.add_field(name='From', value=fro_asset.name, inline=False)
e.add_field(name='To', value=to_asset.name, inline=False)
e.add_field(name='Rate', value=fro_asset.price / to_asset.price, inline=False)
await ctx.send(embed=e)
except ValueError:
await ctx.reply('At least one of the tickers supplied is invalid')
except TimeoutError:
await ctx.reply('Too many requests; try again in a minute or so.')
except:
traceback.print_exc()
def setup(bot):
bot.add_cog(FinanceCog(bot))
| 2.90625 | 3 |
git-code/SDNN_python-master/src/main.py | mmaximiliano/SDNN | 0 | 12769542 | <filename>git-code/SDNN_python-master/src/main.py
"""
__author__ = <NAME>
__email__ = <EMAIL>
SDNN Implementation based on Kheradpisheh, S.R., et al. 'STDP-based spiking deep neural networks
for object recognition'. arXiv:1611.01421v1 (Nov, 2016)
"""
from SDNN_cuda import SDNN
from Classifier import Classifier
import numpy as np
from os.path import dirname, realpath
from math import floor
import time
def main():
# Flags
learn_SDNN = False # This flag toggles between Learning STDP and classify features
# or just classify by loading pretrained weights for the face/motor dataset
if learn_SDNN:
set_weights = False # Loads the weights from a path (path_set_weigths) and prevents any SDNN learning
save_weights = True # Saves the weights in a path (path_save_weigths)
save_features = True # Saves the features and labels in the specified path (path_features)
else:
set_weights = True # Loads the weights from a path (path_set_weigths) and prevents any SDNN learning
save_weights = False # Saves the weights in a path (path_save_weigths)
save_features = False # Saves the features and labels in the specified path (path_features)
# ------------------------------- Learn, Train and Test paths-------------------------------#
# Image sets directories
path = dirname(dirname(realpath(__file__)))
spike_times_learn = [path + '/datasets/LearningSet/Face/', path + '/datasets/LearningSet/Motor/']
spike_times_train = [path + '/datasets/TrainingSet/Face/', path + '/datasets/TrainingSet/Motor/']
spike_times_test = [path + '/datasets/TestingSet/Face/', path + '/datasets/TestingSet/Motor/']
# Results directories
path_set_weigths = '../results/'
path_save_weigths = '../results/'
path_features = '../results/'
# ------------------------------- SDNN -------------------------------#
# SDNN_cuda parameters
DoG_params = {'img_size': (250, 160), 'DoG_size': 7, 'std1': 1., 'std2': 2.} # img_size is (col size, row size)
total_time = 15
network_params = [{'Type': 'input', 'num_filters': 1, 'pad': (0, 0), 'H_layer': DoG_params['img_size'][1],
'W_layer': DoG_params['img_size'][0]},
{'Type': 'conv', 'num_filters': 4, 'filter_size': 5, 'th': 10.},
{'Type': 'pool', 'num_filters': 4, 'filter_size': 7, 'th': 0., 'stride': 6},
{'Type': 'conv', 'num_filters': 20, 'filter_size': 17, 'th': 60.},
{'Type': 'pool', 'num_filters': 20, 'filter_size': 5, 'th': 0., 'stride': 5},
{'Type': 'conv', 'num_filters': 20, 'filter_size': 5, 'th': 2.}]
weight_params = {'mean': 0.8, 'std': 0.01}
max_learn_iter = [0, 3000, 0, 5000, 0, 6000, 0]
stdp_params = {'max_learn_iter': max_learn_iter,
'stdp_per_layer': [0, 10, 0, 4, 0, 2],
'max_iter': sum(max_learn_iter),
'a_minus': np.array([0, .003, 0, .0003, 0, .0003], dtype=np.float32),
'a_plus': np.array([0, .004, 0, .0004, 0, .0004], dtype=np.float32),
'offset_STDP': [0, floor(network_params[1]['filter_size']),
0,
floor(network_params[3]['filter_size']/8),
0,
floor(network_params[5]['filter_size'])]}
# Create network
first_net = SDNN(network_params, weight_params, stdp_params, total_time,
DoG_params=DoG_params, spike_times_learn=spike_times_learn,
spike_times_train=spike_times_train, spike_times_test=spike_times_test, device='GPU')
# Set the weights or learn STDP
if set_weights:
weight_path_list = [path_set_weigths + 'weight_' + str(i) + '.npy' for i in range(len(network_params) - 1)]
first_net.set_weights(weight_path_list)
else:
first_net.train_SDNN()
# Save the weights
if save_weights:
weights = first_net.get_weights()
for i in range(len(weights)):
np.save(path_save_weigths + 'weight_'+str(i), weights[i])
# Get features
X_train, y_train = first_net.train_features()
X_test, y_test = first_net.test_features()
# Save X_train and X_test
if save_features:
np.save(path_features + 'X_train', X_train)
np.save(path_features + 'y_train', y_train)
np.save(path_features + 'X_test', X_test)
np.save(path_features + 'y_test', y_test)
# ------------------------------- Classify -------------------------------#
classifier_params = {'C': 1.0, 'gamma': 'auto'}
train_mean = np.mean(X_train, axis=0)
train_std = np.std(X_train, axis=0)
X_train -= train_mean
X_test -= train_mean
X_train /= (train_std + 1e-5)
X_test /= (train_std + 1e-5)
svm = Classifier(X_train, y_train, X_test, y_test, classifier_params, classifier_type='SVM')
train_score, test_score = svm.run_classiffier()
print('Train Score: ' + str(train_score))
print('Test Score: ' + str(test_score))
print('DONE')
if __name__ == '__main__':
start = time.time()
main()
end = time.time()
print(end-start)
| 2.53125 | 3 |
feature_detection/nms_test/test_num_v1.py | hadleyhzy34/point_cloud | 0 | 12769543 | import numpy as np
import time
from nms.nums_py2 import py_cpu_nms # for cpu
# from nms.gpu_nms import gpu_nms # for gpu
np.random.seed( 1 ) # keep fixed
num_rois = 6000
minxy = np.random.randint(50,145,size=(num_rois ,2))
maxxy = np.random.randint(150,200,size=(num_rois ,2))
score = 0.8*np.random.random_sample((num_rois ,1))+0.2
boxes_new = np.concatenate((minxy,maxxy,score), axis=1).astype(np.float32)
def nms_test_time(boxes_new):
thresh = [0.7,0.8,0.9]
T = 50
for i in range(len(thresh)):
since = time.time()
for t in range(T):
keep = py_cpu_nms(boxes_new, thresh=thresh[i]) # for cpu
# keep = gpu_nms(boxes_new, thresh=thresh[i]) # for gpu
print("thresh={:.1f}, time wastes:{:.4f}".format(thresh[i], (time.time()-since)/T))
return keep
if __name__ =="__main__":
nms_test_time(boxes_new)
| 2.265625 | 2 |
examples/apps/simple_imaging_app/__init__.py | jlvahldiek/monai-deploy-app-sdk | 28 | 12769544 | <reponame>jlvahldiek/monai-deploy-app-sdk
import os
import sys
_current_dir = os.path.abspath(os.path.dirname(__file__))
if sys.path and os.path.abspath(sys.path[0]) != _current_dir:
sys.path.insert(0, _current_dir)
del _current_dir
| 1.523438 | 2 |
tests/MPUSensor/MPURollPitch.py | murrayireland/Rover-Code | 0 | 12769545 | #!/usr/bin/env python
"""MPURollPitch.py: Receives raw data from MPU 9DOF click IMU+Magnetometer and calculates roll and pitch angles."""
import smbus
import math
# Power management registers
power_mgmt_1 = 0x6b
power_mgmt_2 = 0x6c
def read_byte(adr):
return bus.read_byte_data(address, adr)
def read_word(adr):
high = bus.read_byte_data(address, adr)
low = bus.read_byte_data(address, adr+1)
val = (high << 8) + low
return val
def read_word_2c(adr):
val = read_word(adr)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def dist(a, b):
return math.sqrt((a*a) + (b*b))
def get_angle_roll(x, y, z):
radians = math.atan2(y, z)
return math.degrees(radians)
def get_angle_pitch(x, y, z):
radians = -math.atan2(x, dist(y, z))
return math.degrees(radians)
bus = smbus.SMBus(1) # or bus = smbus.SMBus(0) for Revision 1 boards
address = 0x69 # Address via i2cdetect
# Now wake up the IMU as it starts in sleep mode
bus.write_byte_data(address, power_mgmt_1, 0)
try:
while True:
# Get accelerometer data
accel_xout = read_word_2c(0x3b)
accel_yout = read_word_2c(0x3d)
accel_zout = read_word_2c(0x3f)
# Scale data to m/s2
accel_xout_scaled = accel_xout/16384.0
accel_yout_scaled = accel_yout/16384.0
accel_zout_scaled = accel_zout/16384.0
# Get roll and pitch angles
roll = get_angle_roll(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled)
pitch = get_angle_pitch(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled)
# Print
# print "Roll: {:.2f} deg, Pitch: {:.2f} deg".format(roll, pitch)
print "Acceleration - x: {:.2f}, y: {:.2f}, z: {:.2f}".format(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled)
except KeyboardInterrupt:
pass
| 3.078125 | 3 |
tests/test_utils/test_mqtt_broker.py | jordan-hamilton/petnet-feeder-service | 47 | 12769546 | def test_mqtt_broker_default_config():
from feeder.util.mqtt.broker import FeederBroker
from feeder import settings
broker = FeederBroker()
assert broker.config["listeners"]["tcp-1"] == {
"bind": f"0.0.0.0:{settings.mqtt_port}"
}
assert broker.config["listeners"]["tcp-ssl-1"] == {
"bind": f"0.0.0.0:{settings.mqtts_port}",
"ssl": True,
"cafile": settings.mqtts_public_key,
"certfile": settings.mqtts_public_key,
"keyfile": settings.mqtts_private_key,
}
def test_mqtt_broker_config_overrides():
from feeder.util.mqtt.broker import FeederBroker
overrides = {"auth": {}}
broker = FeederBroker(config_overrides=overrides)
assert broker.config["auth"] == {}
| 2.3125 | 2 |
network/layer_implementations/ConvLSTMCell.py | DesperateMaker/TrackR-CNN | 522 | 12769547 | import tensorflow as tf
from network.Util import smart_shape
RNNCell = tf.nn.rnn_cell.RNNCell
LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple
def _conv2d(x, W, strides=None):
if strides is None:
strides = [1, 1]
return tf.nn.conv2d(x, W, strides=[1] + strides + [1], padding="SAME")
def dynamic_conv_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
# inputs should have shape (time, batch, height, width, feature)
input_shape = smart_shape(inputs)
num_units = cell.num_units()
h, final_state = tf.nn.dynamic_rnn(cell, inputs, sequence_length, initial_state, dtype, parallel_iterations,
swap_memory, time_major, scope)
h = tf.reshape(h, tf.stack([input_shape[0], input_shape[1], input_shape[2], input_shape[3], num_units]))
return h, final_state
# similar to https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
# for maximal flexibility we allow to pass the weights externally
class ConvLSTMCell(RNNCell):
def __init__(self, num_units, height, width, filter_size, forget_bias=1.0, activation=tf.tanh, W=None, b=None):
self._num_units = num_units
self._height = height
self._width = width
self._size = num_units * height * width
self._forget_bias = forget_bias
self._activation = activation
self._filter_size = list(filter_size)
if W is not None:
W_shape = W.get_shape().as_list()
assert len(W_shape) == 4
assert W_shape[:2] == self._filter_size
assert W_shape[-1] == 4 * self._num_units
self._W = W
else:
self._W = None
if b is not None:
b_shape = b.get_shape().as_list()
assert len(b_shape) == 1
assert b_shape[0] == 4 * self._num_units
self._b = b
else:
self._b = None
def __call__(self, inputs, state, scope=None):
#inputs: `2-D` tensor with shape `[batch_size x input_size]`.
#state: tuple with shapes `[batch_size x s] for s in self.state_size
with tf.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
c, h = state
concat = self._conv(inputs, h)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(axis=3, num_or_size_splits=4, value=concat)
batch = inputs.get_shape().as_list()[0]
if batch is None:
batch = tf.shape(inputs)[0]
i, j, f, o = [tf.reshape(x, [batch, -1]) for x in [i, j, f, o]]
new_c = (c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * tf.sigmoid(o)
new_state = LSTMStateTuple(new_c, new_h)
return new_h, new_state
def _conv(self, inputs, h):
batch = inputs.get_shape().as_list()[0]
if batch is None:
batch = tf.shape(inputs)[0]
n_input_features = inputs.get_shape().as_list()[-1]
#inputs = tf.reshape(inputs, [batch, self._height, self._width, n_input_features])
h = tf.reshape(h, [batch, self._height, self._width, self._num_units])
inp = tf.concat([inputs, h], axis=3)
if self._W is not None:
W = self._W
assert W.get_shape().as_list()[2] == n_input_features + self._num_units
else:
W = tf.get_variable("W", shape=(self._filter_size + [n_input_features + self._num_units, 4 * self._num_units]))
if self._b is not None:
b = self._b
else:
zero_initializer = tf.constant_initializer(0.0, dtype=inputs.dtype)
b = tf.get_variable("b", shape=(4 * self._num_units), initializer=zero_initializer)
y = _conv2d(inp, W) + b
return y
def num_units(self):
return self._num_units
@property
def state_size(self):
return LSTMStateTuple(self._size, self._size)
@property
def output_size(self):
return self._size
| 2.921875 | 3 |
pyserval/connection.py | umr-ds/pyserval | 1 | 12769548 | <reponame>umr-ds/pyserval
"""
pyserval.connection
~~~~~~~~~~~~~~~~~~~
"""
from pyserval.lowlevel.connection import RestfulConnection
from pyserval.exceptions import UnauthorizedError
from typing import Any
from requests.models import Response
class CheckedConnection(RestfulConnection):
"""Encapsulates HTTP-calls and throws exceptions for common error-cases
Args:
host (str): Hostname to connect to
port (int): Port to connect to
user (str): Username for HTTP basic auth
passwd (str): Password for HTTP basic auth
"""
def __init__(
self,
host: str = "localhost",
port: int = 4110,
user: str = "pyserval",
passwd: str = "<PASSWORD>",
) -> None:
RestfulConnection.__init__(self, host, port, user, passwd)
def get(self, path: str, **params: Any) -> Response:
"""Sends GET-request to REST-API
Args:
path (str): (relative) path to the REST-endpoint
params (Any): Additional parameters to be sent with the request
Returns:
requests.models.Response: Response returned by the serval-server
Throws:
UnauthorizedError: If username/password is wrong
"""
response = RestfulConnection.get(self, path, **params)
if response.status_code == 401:
raise UnauthorizedError()
return response
def post(self, path: str, **params: Any) -> Response:
"""Sends POST-request to REST-API
Args:
path (str): (relative) path to the REST-endpoint
params (dict[str, Any]): Additional parameters to be sent with the request
Returns:
requests.models.Response: Response returned by the serval-server
Throws:
UnauthorizedError: If username/password is wrong
"""
response = RestfulConnection.post(self, path, **params)
if response.status_code == 401:
raise UnauthorizedError()
return response
def put(self, path: str, **params: Any) -> Response:
"""Sends PUT-request to REST-API
Args:
path (str): (relative) path to the REST-endpoint
params (dict[str, Any]): Additional parameters to be sent with the request
Returns:
requests.models.Response: Response returned by the serval-server
Throws:
UnauthorizedError: If username/password is wrong
"""
response = RestfulConnection.put(self, path, **params)
if response.status_code == 401:
raise UnauthorizedError()
return response
def delete(self, path: str, **params: Any) -> Response:
"""Sends DELETE-request to REST-API
Args:
path (str): (relative) path to the REST-endpoint
params (dict[str, Any]): Additional parameters to be sent with the request
Returns:
requests.models.Response: Response returned by the serval-server
Throws:
UnauthorizedError: If username/password is wrong
"""
response = RestfulConnection.delete(self, path, **params)
if response.status_code == 401:
raise UnauthorizedError()
return response
def patch(self, path: str, **params: Any) -> Response:
"""Sends PATCH-request to REST-API
Args:
path (str): (relative) path to the REST-endpoint
params (dict[str, Any]): Additional parameters to be sent with the request
Returns:
requests.models.Response: Response returned by the serval-server
Throws:
UnauthorizedError: If username/password is wrong
"""
response = RestfulConnection.patch(self, path, **params)
if response.status_code == 401:
raise UnauthorizedError()
return response
| 2.875 | 3 |
models/recipe_nutrient.py | nikgun1984/ketolife_backend | 1 | 12769549 | <gh_stars>1-10
from models import db
class RecipeNutrient(db.Model):
"""Recipe-Nutrient Model Association table"""
__tablename__ = 'recipe_has_nutrients'
id = db.Column(
db.Integer,
primary_key=True,
autoincrement=True
)
recipe_id = db.Column(
db.Integer,
db.ForeignKey("recipes.id"),
nullable=False
)
nutrient_id = db.Column(
db.Integer,
db.ForeignKey("nutrients.id"),
nullable=False
)
total_daily = db.Column(
db.Integer,
nullable=False
)
total_nutrients = db.Column(
db.Integer,
nullable=False
) | 2.921875 | 3 |
route.py | 1306298019/YOLOV4 | 0 | 12769550 | <gh_stars>0
import scipy
from scipy import misc
import os
import time
import glob
from scipy import ndimage
def get_image_paths(folder):
return glob.glob(os.path.join(folder, '*.png'))
def create_read_img(filename):
im = misc.imread(filename)
img_rote_90 = ndimage.rotate(im, 90)
scipy.misc.imsave(filename[:-4]+'_90.png',img_rote_90)
img_rote_180 = ndimage.rotate(im, 180)
scipy.misc.imsave(filename[:-4]+'_180.png',img_rote_180)
img_rote_270 = ndimage.rotate(im, 270)
scipy.misc.imsave(filename[:-4]+'_270.png',img_rote_270)
print(filename)
img_path = '/media/wxy/000F8E4B0002F751/test/'
imgs = get_image_paths(img_path)
#print (imgs)
for i in imgs:
create_read_img(i)
| 2.625 | 3 |