content
stringlengths 5
1.05M
|
|---|
from pyspark import SparkConf
from pyspark.sql import SparkSession, Window
from pyspark.sql.types import ArrayType, StructField, StructType, StringType, IntegerType, DecimalType, FloatType
from pyspark.sql.functions import udf, collect_list, struct, explode
from decimal import Decimal
import random
import pandas as pd
import numpy as np
appName = "Python Example - UDF without Apache Arrow"
master = 'local'
# Create Spark session
conf = SparkConf().setMaster(master)
spark = SparkSession.builder.config(conf=conf) \
.getOrCreate()
# Construct the data frame directly (without reading from HDFS)
cust_count = 10
txn_count = 100
data = [(i, j, Decimal(i*j*random.random()*random.choice((-1, 1)))) for j in range(txn_count)
for i in range(cust_count)]
# Create a schema for the dataframe
schema = StructType([
StructField('CustomerID', IntegerType(), False),
StructField('TransactionID', IntegerType(), False),
StructField('Amount', DecimalType(scale=2), True)
])
# Create the data frame
df = spark.createDataFrame(data, schema=schema)
# Function 1 - Scalar function - dervice a new column with value as Credit or Debit.
def calc_credit_debit_func(amount):
return "Credit" if amount.any >= 0 else "Debit"
fn_credit_debit = udf(calc_credit_debit_func, returnType=StringType())
df = df.withColumn("CreditOrDebit", fn_credit_debit(df.Amount))
df.show()
# Function 2 - Group map function - calculate the difference from mean
attributes = [
StructField('TransactionID', IntegerType(), False),
StructField('Amount', DecimalType(scale=2), False),
StructField('CreditOrDebit', StringType(), False),
StructField('Diff', DecimalType(scale=2), False)
]
attribute_names = [a.name for a in attributes]
@udf(ArrayType(StructType(attributes)))
def fn_calc_diff_from_mean(txn):
dict_list = [row.asDict() for row in txn]
pdf = pd.DataFrame(dict_list)
amount = pdf.Amount
pdf = pdf.assign(Diff=amount-Decimal(amount.mean()))
return [[r[attr] if attr in r else None for attr in attribute_names] for r in pdf.to_dict(orient='records')]
df_map = df.groupby("CustomerID")\
.agg(collect_list(struct(['TransactionID', 'Amount', 'CreditOrDebit'])).alias('Transactions')) \
.withColumn("EnrichedTransactions", fn_calc_diff_from_mean("Transactions"))
df_map.show(10)
df_map_expanded = df_map.withColumn("transactions_exploded", explode("EnrichedTransactions")) \
.select("CustomerID", "transactions_exploded.*")
df_map_expanded.show(100)
# Function 3 - Group aggregate function - calculate mean only
@udf(DecimalType(scale=2))
def mean_udf(amount):
return np.mean(amount)
df_agg = df.groupby("CustomerID").agg(collect_list("Amount").alias("Amounts"))\
.withColumn("Mean", mean_udf("Amounts"))
df_agg.show()
|
from ...utils import constants
import pandas as pd
import geopandas as gpd
import numpy as np
import shapely
import pytest
from contextlib import ExitStack
from ...core.trajectorydataframe import TrajDataFrame
from ...models.gravity import Gravity
from ...models.epr import EPR, DensityEPR, SpatialEPR, Ditras
from ...models.markov_diary_generator import MarkovDiaryGenerator
from ...preprocessing import detection, clustering
atol = 1e-12
# fix a random seed
np.random.seed(2)
def all_equal(a, b):
return np.allclose(a, b, rtol=0., atol=atol)
def global_variables():
# tessellation
tess_polygons = [[[7.481, 45.184],
[7.481, 45.216],
[7.526, 45.216],
[7.526, 45.184],
[7.481, 45.184]],
[[7.481, 45.216],
[7.481, 45.247],
[7.526, 45.247],
[7.526, 45.216],
[7.481, 45.216]],
[[7.526, 45.184],
[7.526, 45.216],
[7.571, 45.216],
[7.571, 45.184],
[7.526, 45.184]],
[[7.526, 45.216],
[7.526, 45.247],
[7.571, 45.247],
[7.571, 45.216],
[7.526, 45.216]]]
geom = [shapely.geometry.Polygon(p) for p in tess_polygons]
tessellation = gpd.GeoDataFrame(geometry=geom, crs="EPSG:4326")
tessellation = tessellation.reset_index().rename(columns={"index": constants.TILE_ID})
tot_outflow = np.random.randint(10, 20, size=len(tessellation))
relevance = np.random.randint(5, 10, size=len(tessellation))
tessellation[constants.TOT_OUTFLOW] = tot_outflow
tessellation[constants.RELEVANCE] = relevance
tessellation = tessellation
gm = Gravity(gravity_type='singly constrained')
gmfdf = gm.generate(tessellation, out_format='probabilities')
odM = gmfdf.to_matrix()
gcgm = Gravity(gravity_type='globally constrained')
# instantiate a TrajDataFrame to fit the markov diary generator
lats_lngs = np.array([[39.978253, 116.3272755],
[40.013819, 116.306532],
[39.878987, 116.1266865],
[40.013819, 116.306532],
[39.97958, 116.313649],
[39.978696, 116.3262205],
[39.98153775, 116.31079],
[39.978161, 116.3272425],
[38.978161, 115.3272425]])
traj = pd.DataFrame(lats_lngs, columns=[constants.LATITUDE, constants.LONGITUDE])
traj[constants.DATETIME] = pd.to_datetime([
'20130101 8:34:04', '20130101 10:34:08', '20130105 10:34:08',
'20130110 12:34:15', '20130101 1:34:28', '20130101 3:34:54',
'20130101 4:34:55', '20130105 5:29:12', '20130115 00:29:12'])
traj[constants.UID] = [1 for _ in range(5)] + [2 for _ in range(3)] + [3]
tdf = TrajDataFrame(traj)
stdf = detection.stops(tdf)
cstdf = clustering.cluster(stdf)
return tessellation, gm, gmfdf, gcgm, odM, cstdf
tessellation, gm, gmfdf, gcgm, odM, cstdf = global_variables()
# generate
@pytest.mark.parametrize('epr_model_type', [EPR, DensityEPR, SpatialEPR, Ditras])
@pytest.mark.parametrize('start_date', [pd.to_datetime('2019/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2019/01/02 08:00:00')])
@pytest.mark.parametrize('gravity_singly', [{}, gm, gcgm])
@pytest.mark.parametrize('n_agents', [1, 2])
@pytest.mark.parametrize('starting_locations', [None, 'random'])
@pytest.mark.parametrize('od_matrix', [None, odM])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
def test_epr_generate(epr_model_type, start_date, end_date, gravity_singly,
n_agents, starting_locations, od_matrix, random_state, show_progress):
# TODO: check correctness of results
if starting_locations == 'random':
starting_locations = tessellation.sample(n=n_agents, replace=True)[constants.TILE_ID].values.tolist()
# inititalize model
if epr_model_type == Ditras:
# create a markov diary generator
mdg = MarkovDiaryGenerator()
mdg.fit(cstdf, n_agents, lid=constants.CLUSTER)
epr = epr_model_type(mdg)
else:
epr = epr_model_type()
# generate flows
with ExitStack() as stack:
if gravity_singly != {}:
if gravity_singly.gravity_type != 'singly constrained':
stack.enter_context(pytest.raises(AttributeError))
tdf = epr.generate(start_date, end_date,
spatial_tessellation=tessellation,
gravity_singly=gravity_singly,
n_agents=n_agents,
starting_locations=starting_locations,
od_matrix=od_matrix,
random_state=random_state,
show_progress=show_progress)
assert isinstance(tdf, TrajDataFrame)
|
"""Verbose names."""
# pylint: disable=invalid-name
from django.db import migrations, models
class Migration(migrations.Migration):
"""Verbose names."""
dependencies = [
('IoT_DataMgmt', '0091_rename_logical_data_type')
]
operations = [
migrations.AlterModelOptions(
name='equipmentdatafield',
options={'ordering': ('equipment_general_type', 'name'),
'verbose_name': 'Equipment Data Field',
'verbose_name_plural': 'Equipment Data Fields'}),
migrations.AlterModelOptions(
name='equipmentdatafieldtype',
options={'ordering': ('name',),
'verbose_name': 'Equipment Data Field Type',
'verbose_name_plural': 'Equipment Data Field Types'}),
migrations.AlterModelOptions(
name='equipmentfacility',
options={'ordering': ('name',),
'verbose_name': 'Equipment Facility',
'verbose_name_plural': 'Equipment Facilities'}),
migrations.AlterModelOptions(
name='equipmentgeneraltype',
options={'ordering': ('name',),
'verbose_name': 'Equipment General Type',
'verbose_name_plural': 'Equipment General Types'}),
migrations.AlterModelOptions(
name='equipmentinstance',
options={'ordering': ('equipment_general_type',
'equipment_unique_type',
'name'),
'verbose_name': 'Equipment Instance',
'verbose_name_plural': 'Equipment Instances'}),
migrations.AlterModelOptions(
name='equipmentsystem',
options={'ordering': ('equipment_facility', 'name', 'date'),
'verbose_name': 'Equipment System',
'verbose_name_plural': 'Equipment Systems'}),
migrations.AlterModelOptions(
name='equipmentuniquetype',
options={'ordering': ('equipment_general_type', 'name'),
'verbose_name': 'Equipment Unique Type',
'verbose_name_plural': 'Equipment Unique Types'}),
migrations.AlterModelOptions(
name='equipmentuniquetypegroup',
options={'ordering': ('equipment_general_type', 'name'),
'verbose_name': 'Equipment Unique Type Group',
'verbose_name_plural': 'Equipment Unique Type Groups'}),
migrations.AlterModelOptions(
name='equipmentuniquetypegroupdatafieldprofile',
options={
'ordering': ('equipment_unique_type_group',
'equipment_data_field',
'-to_date'),
'verbose_name':
'Equipment Unique Type Group Data Field Profile',
'verbose_name_plural':
'Equipment Unique Type Group Data Field Profiles'}),
migrations.AlterModelOptions(
name='globalconfig',
options={'ordering': ('key',),
'verbose_name': 'Global Config',
'verbose_name_plural': 'Global Configs'}),
migrations.AlterModelOptions(
name='logicaldatatype',
options={'ordering': ('name',),
'verbose_name': 'Logical Data Type',
'verbose_name_plural': 'Logical Data Types'}),
migrations.AlterModelOptions(
name='numericmeasurementunit',
options={'ordering': ('name',),
'verbose_name': 'Numeric Measurement Unit',
'verbose_name_plural': 'Numeric Measurement Units'}),
migrations.AlterField(
model_name='logicaldatatype',
name='name',
field=models.CharField(
db_index=True,
max_length=255,
unique=True,
verbose_name='Logical Data Type'))
]
|
import pandas as pd
import numpy as np
import PIL
from PIL import Image
import pickle
import random
import matplotlib.pyplot as plt
import json
from ast import literal_eval as make_tuple
from operator import itemgetter
from collections import Counter
import math
'''
1. Read each fold for each age group
2. Add counts of each age group
3. Divide by no. of folds. Name it X.
4. Distribute X images per fold
Properties in original CSV:
user_id, original_image, face_id, age, gender, fiducial_yaw_angle
New CSV properties:
Eg:
Fold 0: 30M 20F
30M: 5 3 2 10 6 2 1 1
20F: 0 1 2 8 7 1 1 0
New Distribution:
Fold 0
30M: 4 2 2 6 8 4 3 1
31F: 2 3 3 7 9 4 2 1
New Distribution:
Fold 1
28M: 3 3 2 6 8 3 2 1
24F: 1 2 2 6 8 4 1 0
Fold 1: 25M 35F
25M: 2 2 2 2 10 5 4 1
35F: 3 4 3 5 10 7 2 1
Age group 0 = 4M, 2F
Age group 1 = 2M, 2F
Age group 2 = 2M, 2F
Age group 3 = 6M, 6F
Age group 4 = 8M, 8F
Age group 5 = 4M, 4F
Age group 6 = 2M, 1F
Age group 7 = 1M, 1F
'''
no_of_folds = 4
def load_file(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def load_pickles(name,no_of_age_groups = 8):
with open(name+'.pkl', 'rb') as f:
a=pickle.load(f)
b=pickle.load(f)
c=pickle.load(f)
d=pickle.load(f)
e=pickle.load(f)
f1=pickle.load(f)
g=pickle.load(f)
h=pickle.load(f)
return a+b+c+d+e+f1+g+h
def get_age_range_id(age_tuple):
age_ranges = [(0,2),(4,6),(8,13),(15,20),(25,32),(38,43),(48,53),(60,100)]
diff_tuple = []
if age_tuple:
for r in age_ranges:
x = tuple(np.subtract(r,age_tuple))
x = tuple(np.absolute(x))
diff_tuple.append(x)
min_index = diff_tuple.index(min(diff_tuple, key=itemgetter(1)))
return min_index
def read_fold_for_each_group():
fold_names = ['0','1','2','3']
male_pickle_file_path_prefix = '/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/male/'
female_pickle_file_path_prefix = '/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/female/'
age_range_ids = [0,1,2,3,4,5,6,7]
male_age_group_counters_across_folds = np.zeros(8)
female_age_group_counters_across_folds = np.zeros(8)
for fold in fold_names:
train_ages = []
print('Trying to read training fold: %s......' % fold)
male_fold_file = load_file(male_pickle_file_path_prefix+'male_fold_'+fold+'_data')
male_ages = np.array(male_fold_file['ages'])
male_age_counter = Counter(male_ages)
male_age_counter_dict = dict(male_age_counter)
male_age_counter_dict = sorted(male_age_counter_dict.items(), key=lambda i: i[0])
print male_age_counter_dict
male_age_counter_dict = dict(male_age_counter_dict)
for i in range(8):
if i in male_age_counter_dict:
male_age_group_counters_across_folds[i] += male_age_counter_dict[i]
female_fold_file = load_file(female_pickle_file_path_prefix+'female_fold_'+fold+'_data')
female_ages = np.array(female_fold_file['ages'])
female_age_counter = Counter(female_ages)
female_age_counter_dict = dict(female_age_counter)
female_age_counter_dict = sorted(female_age_counter_dict.items(), key=lambda i: i[0])
print female_age_counter_dict
female_age_counter_dict = dict(female_age_counter_dict)
for i in range(8):
if i in female_age_counter_dict:
female_age_group_counters_across_folds[i] += female_age_counter_dict[i]
print ("\n")
male_age_group_counters_across_folds = np.array(male_age_group_counters_across_folds)
male_age_group_counters_across_folds = np.ceil(male_age_group_counters_across_folds/float(no_of_folds))
print male_age_group_counters_across_folds
female_age_group_counters_across_folds = np.array(female_age_group_counters_across_folds)
female_age_group_counters_across_folds = np.ceil(female_age_group_counters_across_folds/float(no_of_folds))
print female_age_group_counters_across_folds
return male_age_group_counters_across_folds, female_age_group_counters_across_folds
def read_new_folds():
fold_names = ['0','1','2','3']
male_pickle_file_path_prefix = '/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/new_distributed_data/male/'
female_pickle_file_path_prefix = '/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/new_distributed_data/female/'
age_range_ids = [0,1,2,3,4,5,6,7]
male_age_group_counters_across_folds = np.zeros(8)
female_age_group_counters_across_folds = np.zeros(8)
for fold in fold_names:
train_ages = []
print('Trying to read training fold: %s......' % fold)
male_data = load_pickles(male_pickle_file_path_prefix+'male_fold_'+fold)
male_df = pd.DataFrame(male_data)
male_ages = np.array(male_df['age_id'])
male_age_counter = Counter(male_ages)
male_age_counter_dict = dict(male_age_counter)
male_age_counter_dict = sorted(male_age_counter_dict.items(), key=lambda i: i[0])
print male_age_counter_dict
male_age_counter_dict = dict(male_age_counter_dict)
for i in range(8):
if i in male_age_counter_dict:
male_age_group_counters_across_folds[i] += male_age_counter_dict[i]
female_data = load_pickles(female_pickle_file_path_prefix+'female_fold_'+fold)
female_df = pd.DataFrame(female_data)
female_ages = np.array(female_df['age_id'])
female_age_counter = Counter(female_ages)
female_age_counter_dict = dict(female_age_counter)
female_age_counter_dict = sorted(female_age_counter_dict.items(), key=lambda i: i[0])
print female_age_counter_dict
female_age_counter_dict = dict(female_age_counter_dict)
for i in range(8):
if i in female_age_counter_dict:
female_age_group_counters_across_folds[i] += female_age_counter_dict[i]
print len(female_data)
print(' ')
def distribute_data(male_age_group_counters_across_folds,female_age_group_counters_across_folds):
print ("\nIn distributed data....")
fold_names = ['fold_0_data','fold_1_data','fold_2_data','fold_3_data']
male_pickle_file_path_prefix = '/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/male/'
female_pickle_file_path_prefix = '/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_based_data/female/'
age_range_ids = [0,1,2,3,4,5,6,7]
for a in age_range_ids:
male_age_group_data = []
female_age_group_data = []
for fold in fold_names:
'''
male_fold_i's data will be in csv/fold_i
Similarly for female
'''
fold_df = pd.read_csv('/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/data/csvs/'+fold+'.csv')
#print fold+":"
for index, row in fold_df.iterrows():
#user_id, original_image, face_id, age, gender, fiducial_yaw_angle
age = row['age']
gender = row['gender']
yaw_angle = row['fiducial_yaw_angle']
if ((gender!='u') and (gender!='Nan') and (age!='None') and (gender!=' ') and (age!=' ') and (yaw_angle >= -45) and (yaw_angle <= 45)):
age_tuple = make_tuple(age)
age_id = get_age_range_id(age_tuple)
if(age_id == a):
dict = {
'user_id': row['user_id'], 'original_image': row['original_image'],
'face_id': row['face_id'], 'original_gender': gender, 'yaw_angle': yaw_angle,
'original_fold_name': fold, 'age_id': age_id
}
if(gender == 'm'):
male_age_group_data.append(dict)
else:
female_age_group_data.append(dict)
zz1 = int(math.ceil(male_age_group_counters_across_folds[a]))
print len(male_age_group_data)
male_chunks = [male_age_group_data[x:x+zz1] for x in xrange(0, len(male_age_group_data), zz1)]
print ("No. of chunks for age: %i => %i , for male" % (a,len(male_chunks)))
print ("Every chunk is of size: %i" % len(male_chunks[0]))
'''
#Save every male chunk
'''
save_path='/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/new_distributed_data/male/'
for i in range(len(male_chunks)):
with open(save_path+'male_fold_'+str(i) + '.pkl', 'a') as f:
pickle.dump(male_chunks[i], f, pickle.HIGHEST_PROTOCOL)
print 'Saved Male Chunks'
zz2 = int(math.ceil(female_age_group_counters_across_folds[a]))
female_chunks = [female_age_group_data[x:x+zz2] for x in xrange(0, len(female_age_group_data), zz2)]
print ("No. of chunks for age: %i => %i , for female" % (a,len(female_chunks)))
print ("Every chunk is of size: %i" % len(female_chunks[0]))
'''
#Save every female chunk
'''
save_path='/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/new_distributed_data/female/'
for i in range(len(female_chunks)):
with open(save_path+'female_fold_'+str(i) + '.pkl', 'a') as f:
pickle.dump(female_chunks[i], f, pickle.HIGHEST_PROTOCOL)
print 'Saved Female Chunks'
print ('\n')
def main():
#male_age_group_counters_across_folds, female_age_group_counters_across_folds = read_fold_for_each_group()
#distribute_data(male_age_group_counters_across_folds,female_age_group_counters_across_folds)
read_new_folds()
if __name__=='__main__':
main()
|
import numpy as np
import pandas as pd
def fit(x,y):
matrix_a = []
for i in range(len(x.columns)+1):
line = []
if i == 0:
for j in range(len(x.columns)+1):
if j == 0:
line.append(len(x))
else:
line.append(sum(x.iloc[:,j-1]))
else:
for j in range(len(x.columns)+1):
if j == 0:
line.append(sum(x.iloc[:,i-1]))
elif j == i:
square = sum(x.iloc[:,i-1]**2)
line.append(square)
else:
multiply = sum(x.iloc[:,i-1]*x.iloc[:,j-1])
line.append(multiply)
matrix_a.append(line)
matrix_h = []
for i in range(len(x.columns)+1):
if i == 0:
matrix_h.append(sum(y))
else:
matrix_h.append(sum(y*x.iloc[:,i-1]))
matrix = []
for i in range(len(x.columns)+1):
array = np.array(matrix_a)
array[:,i] = matrix_h
matrix.append(array)
matrix_det = []
for i in range(len(x.columns)+2):
if i == 0:
array = np.array(matrix_a)
det_a = np.linalg.det(array)
matrix_det.append(det_a)
else:
array = matrix[i-1]
det_a = np.linalg.det(array)
matrix_det.append(det_a)
coefficient = []
for i in range(len(matrix_det)):
if i == 0:
continue
else:
result = matrix_det[i]/matrix_det[0]
coefficient.append(result)
a = coefficient[0]
b = []
for i in range(1,len(coefficient)):
b.append(coefficient[i])
return a,b
def predict(x,y,test):
prediction = []
for i in range(len(test)):
slope_var = []
for j in range(len(test.columns)):
multiply = test.iloc[i][j]*fit(x,y)[1][j]
slope_var.append(multiply)
result = fit(x,y)[0] + sum(slope_var)
prediction.append(result)
return prediction
def intercept(x,y):
return fit(x,y)[0]
def slope(x,y):
return fit(x,y)[1]
def rsquare(x,y):
y_hat = []
for i in range(len(x)):
slope_var = []
for j in range(len(x.columns)):
multiply = x.iloc[i][j]*fit(x,y)[1][j]
slope_var.append(multiply)
result = fit(x,y)[0] + sum(slope_var)
y_hat.append(result)
sst,sse = [],[]
for i in range(len(y)):
mean_min = (y.tolist()[i]-sum(y)/len(y))**2
sst.append(mean_min)
for i in range(len(y)):
hat_min = (y.tolist()[i]-y_hat[i])**2
sse.append(hat_min)
r_square = 1-(sum(sse)/sum(sst))
return r_square
def r(x,y):
multiple_r = rsquare(x,y)**(1/2)
return multiple_r
def info(x,y):
print('Constant : ',fit(x,y)[0])
for i in range(len(fit(x,y)[1])):
print(f'Coefficient_{i+1} : {fit(x,y)[1][i]}')
print('Multiple R : ',r(x,y))
print('R Squared : ',rsquare(x,y))
|
from thatlib import lines
print(lines(__file__))
|
"""
Collections
===========
This module contains some data structures used in :mod:`relentless` as an
alternative to Python's general purpose containers, for ease of use in constructing
potentials and performing computations during the optimization workflow.
.. autosummary::
:nosignatures:
FixedKeyDict
PairMatrix
KeyedArray
DefaultDict
.. autoclass:: FixedKeyDict
:members:
.. autoclass:: PairMatrix
:members:
.. autoclass:: KeyedArray
:members:
.. autoclass:: DefaultDict
:members:
"""
import collections
import numpy
class FixedKeyDict:
"""Dictionary with fixed keys.
Parameters
----------
keys : array_like
List of keys to be fixed.
default : scalar
Initial value to fill in the dictionary, defaults to ``None``.
Examples
--------
Create a keyed dictionary::
d = FixedKeyDict(keys=('A','B'))
Default values::
>>> print(d)
{'A': None, 'B': None}
Set default values::
d = FixedKeyDict(keys=('A','B'), default=0.0)
>>> print(d)
{'A':0.0, 'B':0.0}
Iterate as a dictionary::
for k in d:
d[k] = 1.0
Access by key::
>>> d['A']
1.0
>>> d['B']
1.0
Partially reassign/update values::
d.update({'A':0.5})
d.update(A=0.5) #equivalent statement
>>> print(d)
{'A':0.5, 'B':1.0}
Single-key dictionary still needs ``keys`` as a tuple::
FixedKeyDict(keys=('A',))
"""
def __init__(self, keys, default=None):
self._keys = tuple(keys)
self._data = {}
self._default = default
self.clear()
def _check_key(self, key):
"""Check that a type is in the dictionary.
Returns
-------
key
The type that is keyed in the dictionary.
Raises
------
KeyError
If the key is not in the dictionary.
"""
if key not in self.keys:
raise KeyError('Key {} is not in dictionary.'.format(key))
return key
def __getitem__(self, key):
key = self._check_key(key)
return self._data[key]
def __setitem__(self, key, value):
key = self._check_key(key)
self._data[key] = value
def __iter__(self):
return iter(self._data)
def __next__(self):
return next(self._data)
def __str__(self):
return str(self._data)
def clear(self):
"""Clear entries in the dictionary, resetting to default."""
for i in self.keys:
self._data[i] = self._default
def update(self, *data, **values):
"""Partially reassigns key values.
If both positional argument (``data``) and keyword arguments (``values``)
are given as parameters, any keys in ``values`` will take precedence over ``data``.
Parameters
----------
data : :class:`dict`
The keys and values to be updated/over-written, in a dictionary form.
values : kwargs
The keys and values to be updated/over-written.
Raises
------
TypeError
If more than one positional argument is given.
"""
if len(data) > 1:
raise TypeError('More than one positional argument is given')
elif len(data) == 1:
for key in data[0]:
self[key] = data[0][key]
for key in values:
self[key] = values[key]
def todict(self):
"""Convert the fixed-key dictionary to a standard dictionary.
Returns
-------
dict
A copy of the data in the dictionary.
"""
return dict(self._data)
@property
def keys(self):
"""tuple: All keys in the dictionary."""
return self._keys
class PairMatrix:
"""Generic matrix of values per-pair.
Defines a symmetric matrix of parameters corresponding to ``(i,j)`` pairs.
The matrix is essentially a dictionary of dictionaries, keyed on ``(i,j)``.
There is an equivalent virtual entry for ``(j,i)``. (The pairs that are
actually saved have ``j >= i``.) The dictionary associated with each pair
can have any number of entries in it, although a common use case is to have
the same parameter stored per-pair.
The pairs in the matrix are frozen from the list of types specified when
the object is constructed. It is an error to access pairs that cannot be
formed from ``types``.
The pair matrix emulates a dictionary, and its pairs are iterable.
Parameters
----------
types : array_like
List of types (A type must be a :class:`str`).
Raises
------
ValueError
If initialization occurs with empty ``types``.
TypeError
If ``types`` does not consist of only strings.
Examples
--------
Create a pair matrix::
m = PairMatrix(types=('A','B'))
Set a pair matrix value::
m['A','A']['energy'] = 1.0
m['A','B']['energy'] = -1.0
m['B','B']['energy'] = 1.0
Get a pair matrix value::
>>> m['A','A']['energy']
1.0
>>> m['A','B']['energy']
-1.0
>>> m['B','A']['energy']
-1.0
Iterate a pair matrix::
for pair in m:
m[pair]['mass'] = 1.0
Multiple parameters are a dictionary::
>>> m['A','B']
{'energy': -1.0, 'mass': 1.0}
Single-type matrix still needs ``types`` as a tuple::
PairMatrix(types=('A',))
"""
def __init__(self, types):
if len(types) == 0:
raise ValueError('Cannot initialize with empty types')
if not all(isinstance(t, str) for t in types):
raise TypeError('All types must be strings')
self.types = tuple(types)
# flood data with type pairs
self._data = {}
for i in self.types:
for j in self.types:
if j >= i:
self._data[i,j] = {}
def _check_key(self, key):
"""Check that a pair key is valid.
Returns
-------
tuple
The `(i,j)` pair that is keyed in the dictionary.
Raises
------
KeyError
If the key is not the right length or is not in the matrix.
"""
if len(key) != 2:
raise KeyError('Coefficient matrix requires a pair of types.')
if key[0] not in self.types:
raise KeyError('Type {} is not in coefficient matrix.'.format(key[0]))
elif key[1] not in self.types:
raise KeyError('Type {} is not in coefficient matrix.'.format(key[1]))
if key[1] >= key[0]:
return key
else:
return (key[1],key[0])
def __getitem__(self, key):
"""Get all coefficients for the `(i,j)` pair."""
i,j = self._check_key(key)
return self._data[i,j]
def __setitem__(self, key, value):
"""Set coefficients for the `(i,j)` pair."""
i,j = self._check_key(key)
self._data[i,j] = value
def __iter__(self):
return iter(self._data)
def __next__(self):
return next(self._data)
def __str__(self):
return str(self._data)
@property
def pairs(self):
"""tuple: All unique pairs in the matrix."""
return tuple(self._data.keys())
class KeyedArray(FixedKeyDict):
"""Numerical array with fixed keys.
Can be used to perform arithmetic operations between two arrays (element-wise)
or between an array and a scalar, as well as vector algebraic operations
(norm, dot product).
Parameters
----------
keys : array_like
List of keys to be fixed.
default : scalar
Initial value to fill in the dictionary, defaults to ``None``.
Examples
--------
Create a keyed array::
k1 = KeyedArray(keys=('A','B'))
k2 = KeyedArray(keys=('A','B'))
Set values through update::
k1.update({'A':2.0, 'B':3.0})
k2.update({'A':3.0, 'B':4.0})
Perform array-array arithmetic operations::
>>> print(k1 + k2)
{'A':5.0, 'B':7.0}
>>> print(k1 - k2)
{'A':-1.0, 'B':-1.0}
>>> print(k1*k2)
{'A':6.0, 'B':12.0}
>>> print(k1/k2)
{'A':0.6666666666666666, 'B':0.75}
>>> print(k1**k2)
{'A':8.0, 'B':81.0}
Perform array-scalar arithmetic operations::
>>> print(k1 + 3)
{'A':5.0, 'B':6.0}
>>> print(3 - k1)
{'A':1.0, 'B':0.0}
>>> print(3*k1)
{'A':6.0, 'B':9.0}
>>> print(k1/10)
{'A':0.2, 'B':0.3}
>>> print(k1**2)
{'A':4.0, 'B':9.0}
>>> print(-k1)
{'A':-2.0, 'B':-3.0}
Compute vector dot product::
>>> print(k1.dot(k2))
18.0
Compute vector norm::
>>> print(k2.norm())
5.0
"""
def __init__(self, keys, default=None):
super().__init__(keys, default)
def _assert_same_keys(self, val):
if (self.keys != val.keys):
raise KeyError('Both KeyedArrays must have identical keys to perform mathematical operations.')
def __add__(self, val):
"""Element-wise addition of two arrays, or of an array and a scalar."""
k = KeyedArray(keys=self.keys)
if isinstance(val, KeyedArray):
self._assert_same_keys(val)
k.update({x: self[x] + val[x] for x in self})
elif numpy.isscalar(val):
k.update({x: self[x] + val for x in self})
else:
raise TypeError('A KeyedArray can only add a scalar or a KeyedArray.')
return k
def __radd__(self, val):
"""Element-wise addition of a scalar and an array."""
k = KeyedArray(keys=self.keys)
if numpy.isscalar(val):
k.update({x: val + self[x] for x in self})
else:
raise TypeError('A KeyedArray can only add a scalar or a KeyedArray.')
return k
def __iadd__(self, val):
"""In-place element-wise addition of two arrays, or of an array or scalar."""
if isinstance(val, KeyedArray):
self._assert_same_keys(val)
for x in self:
self[x] += val[x]
elif numpy.isscalar(val):
for x in self:
self[x] += val
else:
raise TypeError('A KeyedArray can only add a scalar or a KeyedArray.')
return self
def __sub__(self, val):
"""Element-wise subtraction of two arrays, or of an array and a scalar."""
k = KeyedArray(keys=self.keys)
if isinstance(val, KeyedArray):
self._assert_same_keys(val)
k.update({x: self[x] - val[x] for x in self})
elif numpy.isscalar(val):
k.update({x: self[x] - val for x in self})
else:
raise TypeError('A KeyedArray can only subtract a scalar or a KeyedArray.')
return k
def __rsub__(self, val):
"""Element-wise subtraction of a scalar and an array."""
k = KeyedArray(keys=self.keys)
if numpy.isscalar(val):
k.update({x: val - self[x] for x in self})
else:
raise TypeError('A KeyedArray can only subtract a scalar or a KeyedArray.')
return k
def __isub__(self, val):
"""In-place element-wise subtraction of two arrays, or of an array and a scalar."""
if isinstance(val, KeyedArray):
self._assert_same_keys(val)
for x in self:
self[x] -= val[x]
elif numpy.isscalar(val):
for x in self:
self[x] -= val
else:
raise TypeError('A KeyedArray can only subtract a scalar or a KeyedArray.')
return self
def __mul__(self, val):
"""Element-wise multiplication of two arrays, or of an array and a scalar."""
k = KeyedArray(keys=self.keys)
if isinstance(val, KeyedArray):
self._assert_same_keys(val)
k.update({x: self[x]*val[x] for x in self})
elif numpy.isscalar(val):
k.update({x: self[x]*val for x in self})
else:
raise TypeError('A KeyedArray can only multiply a scalar or a KeyedArray.')
return k
def __rmul__(self, val):
"""Element-wise multiplication of a scalar by an array."""
k = KeyedArray(keys=self.keys)
if numpy.isscalar(val):
k.update({x: val*self[x] for x in self})
else:
raise TypeError('A KeyedArray can only multiply a scalar or a KeyedArray.')
return k
def __imul__(self, val):
"""In-place element-wise multiplication of two arrays, or of an array by a scalar."""
if isinstance(val, KeyedArray):
self._assert_same_keys(val)
for x in self:
self[x] *= val[x]
elif numpy.isscalar(val):
for x in self:
self[x] *= val
else:
raise TypeError('A KeyedArray can only multiply a scalar or a KeyedArray.')
return self
def __truediv__(self, val):
"""Element-wise division of two arrays, or of an array by a scalar."""
k = KeyedArray(keys=self.keys)
if isinstance(val, KeyedArray):
self._assert_same_keys(val)
k.update({x: self[x]/val[x] for x in self})
elif numpy.isscalar(val):
k.update({x: self[x]/val for x in self})
else:
raise TypeError('A KeyedArray can only divide a scalar or a KeyedArray.')
return k
def __rtruediv__(self, val):
"""Element-wise division of a scalar by an array."""
k = KeyedArray(keys=self.keys)
if numpy.isscalar(val):
k.update({x: val/self[x] for x in self})
else:
raise TypeError('A KeyedArray can only divide a scalar or a KeyedArray.')
return k
def __itruediv__(self, val):
"""In-place element-wise division of two arrays, or of an array by a scalar."""
if isinstance(val, KeyedArray):
self._assert_same_keys(val)
for x in self:
self[x] /= val[x]
elif numpy.isscalar(val):
for x in self:
self[x] /= val
else:
raise TypeError('A KeyedArray can only divide a scalar or a KeyedArray.')
return self
def __pow__(self, val):
"""Element-wise exponentiation of an array by a scalar or by an array."""
k = KeyedArray(keys=self.keys)
if isinstance(val, KeyedArray):
self._assert_same_keys(val)
k.update({x: self[x]**val[x] for x in self})
elif numpy.isscalar(val):
k.update({x: self[x]**val for x in self})
else:
raise TypeError('A KeyedArray can only be exponentiated by a scalar or by a KeyedArray.')
return k
def __neg__(self):
"""Element-wise negation of an array."""
k = KeyedArray(keys=self.keys)
k.update({x: -self[x] for x in self})
return k
def norm(self):
r"""Vector :math:`\ell^2`-norm.
For a vector :math:`\mathbf{x}=\left[x_1,\ldots,x_n\right]`, the
Euclidean 2-norm :math:`\lVert\mathbf{x}\rVert` is computed as:
.. math::
\lVert\mathbf{x}\rVert = \sqrt{\sum_{k=1}^{n} {x_k}^2}
Returns
-------
float
The vector norm.
"""
return numpy.linalg.norm(list(self.todict().values()))
def dot(self, val):
r"""Vector dot product.
For two vectors :math:`\mathbf{x}=\left[x_1,\ldots,x_n\right]` and
:math:`\mathbf{y}=\left[y_1,\ldots,y_n\right]`, the vector dot product
:math:`\mathbf{x}\cdot\mathbf{y}` is computed as:
.. math::
\mathbf{x}\cdot\mathbf{y} = \sum_{k=1}^{n} {x_k y_k}
Parameters
----------
val : :class:`KeyedArray`
One of the arrays used to compute the dot product.
Returns
-------
float
The vector dot product.
"""
self._assert_same_keys(val)
return numpy.sum([self[x]*val[x] for x in self])
class DefaultDict(collections.abc.MutableMapping):
"""Dictionary which supports a default value.
Parameters
----------
default : float
The default value.
"""
def __init__(self, default):
self._data = {}
self.default = default
def __delitem__(self, key):
del self._data[key]
def __getitem__(self, key):
"""Get keyed item or default value if key is invalid."""
try:
return self._data[key]
except KeyError:
return self.default
def __iter__(self):
return iter(self._data)
def __setitem__(self, key, value):
"""Set value of keyed item."""
if key is None:
raise KeyError('A DefaultDict key cannot be None.')
self._data[key] = value
def __len__(self):
return len(self._data)
@property
def default(self):
"""float or dict: The default value."""
return self._default
@default.setter
def default(self, value):
self._default = value
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.homeRecipes, name='home'),
path('recipes/', views.recipes, name='recipes'),
path('about/', views.about, name='about'),
]
|
class Table(object):
"""Base class for all Table classes."""
def __init__(self, connection, classname):
"""
:param connection: a connection instance.
:param classname: the name of the class for which the crown functions.
:return:
"""
self.classname = classname
self.connection = connection
self._connection = self.connection.db.get_collection(self.classname)
def __getitem__(self, key):
"""
Syntactic sugar.
:param key: The _id of the current class.
:return: A single record or an empty list.
"""
return self._connection.find_one({"_id": key})
def retrieve(self, query, filt=()):
"""
Retrieves items from the connection.
:param query: The query to run.
:param filt: The filter.
:return: A cursor with the filter.
"""
if filt:
return self._connection.find(query, filt)
else:
return self._connection.find(query)
def retrieve_one(self, query, filt=()):
"""
Retrieve a single item from the connection.
:param query: The query to run.
:param filt: The filter.
:return: A single item.
"""
if filt:
return self._connection.find_one(query, filt)
else:
return self._connection.find_one(query)
def bunch(self, listofids, filt=(), orq=True):
"""
Return a bunch of items based on primary keys.
:param listofids: a list of IDs to retrieve.
:param orq: whether to use an OR or an IN query.
:return: a cursor to the specified items.
"""
if orq:
return self.retrieve({"$or": [{"_id": i} for i in listofids]}, filt)
else:
return self.retrieve({"_id": {"$in": listofids}}, filt)
class String(Table):
"""Connection to the String collection"""
def __init__(self, connection):
"""
:param connection: an instance of a Connection class.
:return:
"""
super(String, self).__init__(connection, "string")
def surface(self, listofids, lower=True):
"""
Retrieve the surface form of a list of string ids.
:param listofids: A list of ids (SUI in UMLS terminology)
:param lower: whether to return the lower-cased version or the original version.
:return: a list of surface forms.
"""
if lower:
return [s["lower"] for s in self.bunch(listofids)]
else:
return [s["string"] for s in self.bunch(listofids)]
def concept_id(self, surface):
"""
Retrieves all concept ids associated with a given surface form.
:param surface: The surface form for which to retrieve all concept ids.
:return:
"""
string = self.retrieve_one({"string": surface}, {"_id": 0, "concept": 1})
if string:
return string["concept"]
else:
return []
class Concept(Table):
"""Connection to the Concept collection"""
def __init__(self, connection):
super(Concept, self).__init__(connection, "concept")
def all_definitions(self):
"""
Returns all definitions
:return: A dictionary where the key is the Concept ID and the value is a list of definitions.
"""
return {x["_id"]: x["description"] for x in self.retrieve({"$exists": "definition"}, {"definition": 1})}
def bunch_definitions(self, cids):
"""
Returns the definitions for a bunch of concept ids.
:param cids: A list of concept ids (CUI)
:return: A dictionary where the key is the concept ID and the value is a list of definitions.
"""
return {c["_id"]: c["definition"] for c in self.bunch(cids, {"definition": 1}, orq=True)}
def one_definition(self, cid):
"""
Return all definitions for a single concept.
:param cid: A single cid.
:return: A list of descriptions.
"""
return self[cid]["description"]
def get_preferred(self, cid):
"""
Gets the preferred term associated with a single concept id.
:param cid: a concept id.
:return: the TID of the preferred term.
"""
return self[cid]["preferred"]
def get_synonym(self, cid):
"""
Gets the cids of the concepts which are synonyms of the given cid.
:param cid: the cid.
:return: A list of concept that are synonyms to the given cid.
"""
return self[cid]["rel"]["synonym"]
def get_words(self, cid):
"""
Gets all words which are associated with a concept ID.
:param cid: The concept ID
:return: A list of words.
"""
return self[cid]["string"]
class Term(Table):
"""Connection to the Term collection"""
def __init__(self, connection):
super(Term, self).__init__(connection, "term")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [("django_pesapal", "0002_transaction_payment_status")]
operations = [
migrations.AddField(
model_name="transaction",
name="payment_method",
field=models.CharField(max_length=16, null=True),
preserve_default=True,
)
]
|
import argparse
import os
from utils import get_logger, make_date_dir
from data_utils import load_data_from_csv, batch_loader
import numpy as np
from time import time
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("-m", "--model", type=str, default="LSTNet",
required=True, choices=["LSTNet", "MANN"],
help="Model selected in the list: LSTNet, MANN")
# Optional parameters
args = parser.parse_args()
if args.model == "LSTNet":
from LSTNet.config import Config
from LSTNet.model import Model
config = Config()
else:
from MANN.config import Config
from MANN.model import Model
config = Config()
logger = get_logger(os.path.join(config.model, "logs/"))
logger.info("=======Model Configuration======")
logger.info(config.desc)
logger.info("================================")
try:
train_x, dev_x, test_x, train_y, dev_y, test_y = load_data_from_csv(data_path=config.data_path,
x_len=config.x_len,
y_len=config.y_len,
foresight=config.foresight,
dev_ratio=config.dev_ratio,
test_ratio=config.test_ratio,
seed=config.seed)
logger.info("train_x shape: {}, dev_x shape: {}, test_x shape: {}"
.format(train_x.shape, dev_x.shape, test_x.shape))
logger.info("train_y shape: {}, dev_y shape: {}, test_y shape: {}"
.format(train_y.shape, dev_y.shape, test_y.shape))
model = Model(config)
train_data = list(zip(train_x, train_y))
no_improv = 0
best_loss = 100
model_dir = make_date_dir(os.path.join(config.model, 'model_save/'))
result_dir = make_date_dir(os.path.join(config.model, 'results/'))
start_time = time()
for i in range(config.num_epochs):
train_batches = batch_loader(train_data, config.batch_size)
epoch = i+1
for batch in train_batches:
batch_x, batch_y = zip(*batch)
loss, rse, mape, mae, step = model.train(batch_x, batch_y)
if step % 100 == 0:
logger.info("epoch: {ep}, step: {st}, loss: {lo:.4f}, rse: {rs:.4f}, mape: {map:.4f}, mae: {ma:.4f}".format(ep=epoch, st=step, lo=loss, rs=rse, map=mape, ma=mae))
# dev score for each epoch (no mini batch)
_, dev_loss, dev_rse, dev_mape, dev_mae = model.eval(dev_x, dev_y)
if dev_loss < best_loss:
best_loss = dev_loss
no_improv = 0
logger.info("New score! : dev_loss: {lo:.4f}, dev_rse: {rs:.4f}, dev_mape: {map:.4f}, dev_mae: {ma:.4f}".format(lo=dev_loss, rs=dev_rse, map=dev_mape, ma=dev_mae)
logger.info("Saving model at {}".format(model_dir))
model.save_session(os.path.join(model_dir, config.model))
else:
no_improv += 1
if no_improv == config.nepoch_no_improv:
logger.info("No improvement for %d epochs" % no_improv)
break
elapsed = time()-start_time
# generating results (no mini batch)
model.restore_session(model_dir)
pred, test_loss, test_rse, test_mape, test_mae = model.eval(test_x, test_y)
logger.info("test_loss: {lo:.4f}, test_rse: {rs:.4f}, test_mape: {map:.4f}, test_mae: {ma:.4f}".format(lo=test_loss, rs=test_rse, map=test_mape, ma=test_mae))
# save results
np.save(os.path.join(result_dir, 'pred.npy'), pred)
np.save(os.path.join(result_dir, 'test_y.npy'), test_y)
logger.info("Saving results at {}".format(result_dir))
logger.info("Elapsed training time {0:0.4f}".format(elapsed))
logger.info("Training finished, exit program")
except:
logger.exception("ERROR")
if __name__ == "__main__":
main()
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, assert_array_almost_equal
from scipy.sparse import csgraph
def test_weak_connections():
Xde = np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
Xsp = csgraph.csgraph_from_dense(Xde, null_value=0)
for X in Xsp, Xde:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='weak')
assert_equal(n_components, 2)
assert_array_almost_equal(labels, [0, 0, 1])
def test_strong_connections():
X1de = np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
X2de = X1de + X1de.T
X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)
X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)
for X in X1sp, X1de:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='strong')
assert_equal(n_components, 3)
labels.sort()
assert_array_almost_equal(labels, [0, 1, 2])
for X in X2sp, X2de:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='strong')
assert_equal(n_components, 2)
labels.sort()
assert_array_almost_equal(labels, [0, 0, 1])
def test_strong_connections2():
X = np.array([[0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0]])
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='strong')
assert_equal(n_components, 5)
labels.sort()
assert_array_almost_equal(labels, [0, 1, 2, 2, 3, 4])
def test_weak_connections2():
X = np.array([[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0]])
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='weak')
assert_equal(n_components, 2)
labels.sort()
assert_array_almost_equal(labels, [0, 0, 1, 1, 1, 1])
def test_ticket1876():
# Regression test: this failed in the original implementation
# There should be two strongly-connected components; previously gave one
g = np.array([[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 1, 0]])
n_components, labels = csgraph.connected_components(g, connection='strong')
assert_equal(n_components, 2)
assert_equal(labels[0], labels[1])
assert_equal(labels[2], labels[3])
def test_fully_connected_graph():
# Fully connected dense matrices raised an exception.
# https://github.com/scipy/scipy/issues/3818
g = np.ones((4, 4))
n_components, labels = csgraph.connected_components(g)
assert_equal(n_components, 1)
|
name = "opt"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Test functionality of ci/build_matrix.py"""
import dataclasses
import importlib.util
import json
import string
import sys
from pathlib import Path
# Load the build_matrix.py file using importlib.
# See https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly for reference.
# @learnitall: mypy has a hard time understanding types in these next couple lines, add ignores as needed
_bm_source_path = Path(__file__).parent.parent.parent.parent.joinpath("ci", "build_matrix.py").resolve()
_bm_spec = importlib.util.spec_from_file_location("build_matrix", _bm_source_path)
build_matrix = importlib.util.module_from_spec(_bm_spec)
sys.modules["build_matrix"] = build_matrix
_bm_spec.loader.exec_module(build_matrix) # type: ignore
with open(Path(__file__).parent.joinpath("git_diff_test.txt"), encoding="utf8") as git_diff_test:
EXAMPLE_GIT_DIFF = git_diff_test.read()
with open(Path(__file__).parent.joinpath("find_df_test.txt"), encoding="utf8") as find_df_test:
EXAMPLE_DF_LIST = find_df_test.read()
EXAMPLE_MATRIX_BUILDER_KWARGS_DICT = {
"archs": ("arch1", "arch2"),
"tags": ("latest", "my-sha"),
"bones": ("bone1", "bone2"),
"upstream_branch": "my_upstream_branch",
"dockerfile_set": {"dockerfile1", "dockerfile2"},
"changed_set": {"file1.py", "path/to/file2.py"},
}
def test_parse_git_diff_returns_set_with_right_number_of_files():
"""Test that the parse_git_diff function properly parses given git-diff output."""
result = build_matrix.parse_git_diff(EXAMPLE_GIT_DIFF)
assert isinstance(result, set)
assert len(result) == len(EXAMPLE_GIT_DIFF.strip().split("\n"))
for file_path in result:
assert all(char not in string.whitespace for char in file_path)
def test_parse_dockerfile_list_returns_set_with_right_number_of_files(monkeypatch):
"""Test that the parse_dockerfile_list function properly parses given output."""
# Reset ignores for now
monkeypatch.setattr(build_matrix, "IGNORES", tuple())
result = build_matrix.parse_dockerfile_list(EXAMPLE_DF_LIST)
assert isinstance(result, set)
assert len(result) == len(EXAMPLE_DF_LIST.strip().split("\n"))
for file_path in result:
assert all(char not in string.whitespace for char in file_path)
def test_parse_dockerfile_list_returns_set_with_files_ignored(monkeypatch):
"""Test that the parse_dockerfile_list function properly ignores target dockerfiles."""
monkeypatch.setattr(build_matrix, "IGNORES", (r"Dockerfile\.ppc64le$",))
result = build_matrix.parse_dockerfile_list(EXAMPLE_DF_LIST)
# The test file has 8 ppc64le dockerfiles
assert len(result) == len(EXAMPLE_DF_LIST.strip().split("\n")) - 8
def test_matrix_entry_new_parses_file_path_correctly():
"""Test that the new method of MatrixEntry class correctly parses file paths."""
dockerfile = "my/path/to/benchmark/Dockerfile"
changed = False
archs = ["arch"]
tags = ["latest"]
as_dict = {
"dockerfile": dockerfile,
"image_name": "benchmark",
"benchmark": "benchmark",
"env_var": "BENCHMARK_IMAGE",
"archs": archs,
"changed": changed,
"tags": tags,
}
entry = build_matrix.MatrixEntry.new(dockerfile=dockerfile, changed=changed, archs=archs, tags=tags)
assert dataclasses.asdict(entry) == as_dict
def test_matrix_entry_new_parses_benchmark_name_correctly():
"""Test that the new method of MatrixEntry class correctly parses the benchmark name."""
dockerfiles = [
("my/path/to/benchmark/Dockerfile", "benchmark"),
("my_benchmark_wrapper/Dockerfile", "my_benchmark"),
("benchmark_wrapper/Dockerfile", "benchmark"),
]
for dockerfile, benchmark_name in dockerfiles:
entry = build_matrix.MatrixEntry.new(
dockerfile=dockerfile, changed=True, archs=["myarch"], tags=["latest"]
)
assert entry.benchmark == benchmark_name
def test_matrix_entry_json_methods_correctly_creates_expected_json_dict():
"""Test that the json methods of the MatrixEntry class correctly creates the expected JSON dicts."""
dockerfile = "dockerfile"
changed = True
image_name = "bimage"
benchmark = "benchmark"
env_var = "BENCHMARK_IMAGE"
input_tags = ["0", "1", "2"]
input_archs = ["1", "2", "3"]
entry = build_matrix.MatrixEntry(
dockerfile=dockerfile,
changed=changed,
archs=input_archs,
tags=input_tags,
image_name=image_name,
benchmark=benchmark,
env_var=env_var,
)
for index, json_dict in enumerate(entry.build_json()):
arch = str(index + 1)
tags = " ".join([f"{str(tag)}-{arch}" for tag in input_tags])
assert json_dict["dockerfile"] == dockerfile
assert json_dict["changed"] == changed
assert json_dict["image_name"] == image_name
assert json_dict["benchmark"] == benchmark
assert json_dict["env_var"] == env_var
assert json_dict["arch"] == arch
assert json_dict["tags"] == tags
assert json_dict["tag_suffix"] == f"-{arch}"
json.dumps(json_dict)
for index, json_dict in enumerate(entry.manifest_json()):
tag = str(index)
tag_suffixes = [f"-{arch}" for arch in input_archs]
assert json_dict["dockerfile"] == dockerfile
assert json_dict["changed"] == changed
assert json_dict["image_name"] == image_name
assert json_dict["benchmark"] == benchmark
assert json_dict["archs"] == " ".join(input_archs)
assert json_dict["tag"] == tag
assert json_dict["tag_suffixes"] == " ".join(tag_suffixes)
json.dumps(json_dict)
def test_matrix_builder_can_instantiate_correctly():
"""Test that the MatrixBuilder instantiates correctly with given args and creates empty build matrix."""
builder = build_matrix.MatrixBuilder(**EXAMPLE_MATRIX_BUILDER_KWARGS_DICT)
assert builder.archs == EXAMPLE_MATRIX_BUILDER_KWARGS_DICT["archs"]
assert builder.bones == EXAMPLE_MATRIX_BUILDER_KWARGS_DICT["bones"]
assert builder.dockerfile_set == EXAMPLE_MATRIX_BUILDER_KWARGS_DICT["dockerfile_set"]
assert builder.changed_set == EXAMPLE_MATRIX_BUILDER_KWARGS_DICT["changed_set"]
assert builder.build_matrix == {"include": []}
assert builder.manifest_matrix == {"include": []}
def test_matrix_builder_reset_method_correctly_clears_matrix():
"""Test that the MatrixBuilder.reset method will correctly clear out the matrix."""
builder = build_matrix.MatrixBuilder(**EXAMPLE_MATRIX_BUILDER_KWARGS_DICT)
builder.build_matrix = builder.manifest_matrix = "this is a matrix"
builder.reset()
for matrix in (builder.build_matrix, builder.manifest_matrix):
assert isinstance(matrix, dict)
assert matrix == {"include": []}
def test_matrix_builder_bones_changed_method_correctly_identifies_changed_bones():
"""Test that the MatrixBuilder.bones_changed method will identify if bones have changed."""
builder = build_matrix.MatrixBuilder(**EXAMPLE_MATRIX_BUILDER_KWARGS_DICT)
builder.bones = (r"b.*1", r"b.*2")
builder.changed_set = {"bone1"}
assert builder.bones_changed()
builder.changed_set = {"bone2"}
assert builder.bones_changed()
builder.changed_set = {"bone3"}
assert not builder.bones_changed()
def test_matrix_builder_benchmark_changed_method_correctly_identifies_if_benchmark_changed():
"""Test that the MatrixBuilder.benchmark_changed method will identify if a benchmark has changed."""
changed = [
"snafu/dns_perf_wrapper/Dockerfile",
"snafu/benchmarks/uperf/Dockerfile",
"uperf-wrapper/Dockerfile",
]
not_changed = ["snafu/my_unchanged_benchmark/Dockerfile"]
builder = build_matrix.MatrixBuilder(**EXAMPLE_MATRIX_BUILDER_KWARGS_DICT)
builder.changed_set = build_matrix.parse_git_diff(EXAMPLE_GIT_DIFF)
builder.dockerfile_set = build_matrix.parse_dockerfile_list(EXAMPLE_DF_LIST)
for changed_benchmark in changed:
assert builder.benchmark_changed(changed_benchmark)
for not_changed_benchmark in not_changed:
assert not builder.benchmark_changed(not_changed_benchmark)
def test_matrix_builder_build_method_changed_only_param_works_as_expected():
"""Test that the MatrixBuilder.build method will only output changed dockerfiles with changed_only."""
changed = [
"snafu/dns_perf_wrapper/Dockerfile",
"snafu/benchmarks/uperf/Dockerfile",
"uperf-wrapper/Dockerfile",
]
not_changed = ["snafu/my_unchanged_benchmark/Dockerfile"]
builder = build_matrix.MatrixBuilder(**EXAMPLE_MATRIX_BUILDER_KWARGS_DICT)
builder.changed_set = build_matrix.parse_git_diff(EXAMPLE_GIT_DIFF)
builder.dockerfile_set = build_matrix.parse_dockerfile_list(EXAMPLE_DF_LIST)
reduce_to_dockerfiles = lambda matrix: list(map(lambda entry: entry["dockerfile"], matrix["include"]))
builder.build(changed_only=False)
all_build_dockerfiles = reduce_to_dockerfiles(builder.build_matrix)
all_manifest_dockerfiles = reduce_to_dockerfiles(builder.manifest_matrix)
builder.reset()
builder.build(changed_only=True)
changed_build_dockerfiles = reduce_to_dockerfiles(builder.build_matrix)
changed_manifest_dockerfiles = reduce_to_dockerfiles(builder.manifest_matrix)
assert all(unchanged_df not in changed_build_dockerfiles for unchanged_df in not_changed)
assert all(changed_df in changed_build_dockerfiles for changed_df in changed)
assert all(unchanged_df in all_build_dockerfiles for unchanged_df in not_changed)
assert all(changed_df in all_build_dockerfiles for changed_df in changed)
assert all(unchanged_df not in changed_manifest_dockerfiles for unchanged_df in not_changed)
assert all(changed_df in changed_manifest_dockerfiles for changed_df in changed)
assert all(unchanged_df in all_manifest_dockerfiles for unchanged_df in not_changed)
assert all(changed_df in all_manifest_dockerfiles for changed_df in changed)
|
import os
import shutil
import tempfile
import pytest
from django.core.exceptions import ValidationError
from paper_uploads import validators
from .dummy import make_dummy_file, make_dummy_image
class TestExtensionValidator:
def test_format_extension_list(self):
validator = validators.ExtensionValidator(
allowed=['jpg', 'Gif', 'jpeg', 'JPEG', 'PNG', 'gif', '.png', '.Jpg']
)
assert validator.allowed == ('jpg', 'gif', 'jpeg', 'png')
def test_case_insensitive(self):
validator = validators.ExtensionValidator(allowed=['Pdf'])
with make_dummy_file('something.PDF') as fp:
validator(fp)
def test_fail(self):
validator = validators.ExtensionValidator(allowed=['pdf'])
with make_dummy_file('something.avi') as fp:
with pytest.raises(ValidationError) as exc:
validator(fp)
assert (
exc.value.messages[0] == "File `something.avi` has an invalid extension. "
"Valid extension(s): pdf"
)
def test_custom_message(self):
validator = validators.ExtensionValidator(allowed=['mp3'], message='invalid extension: %(ext)s')
with pytest.raises(ValidationError) as exc:
with make_dummy_file('something.pdf') as fp:
validator(fp)
assert (
exc.value.messages[0] == "invalid extension: pdf"
)
def test_help_text(self):
validator = validators.ExtensionValidator(allowed=['pdf', 'mp3'])
assert str(validator.get_help_text()) == 'Allowed extensions: pdf, mp3'
class TestMimetypeValidator:
def test_allowed_mimetypes(self):
validator = validators.MimeTypeValidator(
allowed=['image/*', 'video/mp4', 'video/ogg', 'image/jpg', 'Video/MP4']
)
assert validator.allowed == ('image/*', 'video/mp4', 'video/ogg', 'image/jpg')
def test_case_insensitive(self):
validator = validators.MimeTypeValidator(allowed=['iMaGe/Jpeg'])
# dummy file with JPEG signature
with make_dummy_file(content=b'\xff\xd8\xff') as fp:
validator(fp)
def test_asterisk(self):
validator = validators.MimeTypeValidator(allowed=['image/*'])
# dummy file with JPEG signature
with make_dummy_file(content=b'\xff\xd8\xff') as fp:
validator(fp)
def test_fail(self):
validator = validators.MimeTypeValidator(allowed=['image/*'])
with make_dummy_file(content=b'Hello') as fp:
with pytest.raises(ValidationError) as exc:
validator(fp)
assert (
exc.value.messages[0]
== "File `something.txt` has an invalid mimetype 'text/plain'"
)
def test_custom_message(self):
validator = validators.MimeTypeValidator(allowed=['image/*'], message='invalid mimetype: %(mimetype)s')
with pytest.raises(ValidationError) as exc:
with make_dummy_file(content=b'Hello') as fp:
validator(fp)
assert (
exc.value.messages[0] == "invalid mimetype: text/plain"
)
def test_help_text(self):
validator = validators.MimeTypeValidator(allowed=['video/mp4', 'video/ogg', 'image/*'])
assert str(validator.get_help_text()) == 'Allowed types: video/mp4, video/ogg, image/*'
class TestSizeValidator:
def test_valid(self):
validator = validators.SizeValidator(limit_value=8)
for size in range(1, 9):
with make_dummy_file(content=b'1234567890'[:size]) as fp:
validator(fp)
def test_fail(self):
validator = validators.SizeValidator(limit_value=8)
with pytest.raises(ValidationError) as exc:
with make_dummy_file(content=b'123456789') as fp:
validator(fp)
assert (
exc.value.messages[0]
== "File `something.txt` is too large. Maximum file size is 8\xa0bytes."
)
def test_custom_message(self):
validator = validators.SizeValidator(limit_value=2, message='invalid size: %(size)s')
with pytest.raises(ValidationError) as exc:
with make_dummy_file(content=b'Hello' * 1024) as fp:
validator(fp)
assert (
exc.value.messages[0] == "invalid size: 5120"
)
def test_help_text(self):
validator = validators.SizeValidator(limit_value=1024*1024)
assert str(validator.get_help_text()) == 'Maximum file size: 1.0 MB'
class TestImageMinSizeValidator:
def test_valid(self):
validator = validators.ImageMinSizeValidator(40, 60)
with make_dummy_image(width=40, height=60) as fp:
validator(fp)
with make_dummy_image(width=41, height=60) as fp:
validator(fp)
with make_dummy_image(width=40, height=61) as fp:
validator(fp)
def test_invalid_image(self):
validator = validators.ImageMinSizeValidator(40, 60)
with pytest.raises(ValidationError) as exc:
with make_dummy_file(content=b'Hello') as fp:
validator(fp)
assert exc.value.messages[0] == "File `something.txt` is not an image"
def test_closed_image(self):
tfile = tempfile.NamedTemporaryFile(delete=False)
shutil.copyfileobj(make_dummy_image(width=40, height=60), tfile)
tfile.close()
assert tfile.closed is True
validator = validators.ImageMinSizeValidator(0, 0)
with pytest.raises(ValidationError, match='is closed'):
validator(tfile)
os.unlink(tfile.name)
def test_fail(self):
validator = validators.ImageMinSizeValidator(40, 60)
with pytest.raises(ValidationError) as exc:
with make_dummy_image(width=39, height=60) as fp:
validator(fp)
assert (
exc.value.messages[0]
== "Image `something.jpg` is not wide enough. The minimum width is 40 pixels."
)
with pytest.raises(ValidationError) as exc:
with make_dummy_image(width=40, height=59) as fp:
validator(fp)
assert (
exc.value.messages[0]
== "Image `something.jpg` is not tall enough. The minimum height is 60 pixels."
)
with pytest.raises(ValidationError) as exc:
with make_dummy_image(width=39, height=59) as fp:
validator(fp)
assert (
exc.value.messages[0]
== "Image `something.jpg` is too small. Image should be at least 40x60 pixels."
)
def test_help_text(self):
validator = validators.ImageMinSizeValidator(640, 480)
assert str(validator.get_help_text()) == 'Minimum dimensions: 640x480 pixels'
validator = validators.ImageMinSizeValidator(640, 0)
assert str(validator.get_help_text()) == 'Minimum image width: 640 pixels'
validator = validators.ImageMinSizeValidator(0, 480)
assert str(validator.get_help_text()) == 'Minimum image height: 480 pixels'
class TestImageMaxSizeValidator:
def test_valid(self):
validator = validators.ImageMaxSizeValidator(40, 60)
with make_dummy_image(width=40, height=60) as fp:
validator(fp)
with make_dummy_image(width=39, height=60) as fp:
validator(fp)
with make_dummy_image(width=40, height=59) as fp:
validator(fp)
def test_invalid_image(self):
validator = validators.ImageMaxSizeValidator(16, 24)
with pytest.raises(ValidationError) as exc:
with make_dummy_file(content=b'Hello') as fp:
validator(fp)
assert exc.value.messages[0] == "File `something.txt` is not an image"
def test_closed_image(self):
tfile = tempfile.NamedTemporaryFile(delete=False)
shutil.copyfileobj(make_dummy_image(width=40, height=60), tfile)
tfile.close()
assert tfile.closed is True
validator = validators.ImageMaxSizeValidator(0, 0)
with pytest.raises(ValidationError, match='is closed'):
validator(tfile)
os.unlink(tfile.name)
def test_fail(self):
validator = validators.ImageMaxSizeValidator(40, 60)
with pytest.raises(ValidationError) as exc:
with make_dummy_image(width=40, height=61) as fp:
validator(fp)
assert (
exc.value.messages[0]
== "Image `something.jpg` is too tall. The maximum height is 60 pixels."
)
with pytest.raises(ValidationError) as exc:
with make_dummy_image(width=41, height=60) as fp:
validator(fp)
assert (
exc.value.messages[0]
== "Image `something.jpg` is too wide. The maximum width is 40 pixels."
)
with pytest.raises(ValidationError) as exc:
with make_dummy_image(width=41, height=61) as fp:
validator(fp)
assert (
exc.value.messages[0]
== "Image `something.jpg` is too big. Image should be at most 40x60 pixels."
)
def test_help_text(self):
validator = validators.ImageMaxSizeValidator(640, 480)
assert str(validator.get_help_text()) == 'Maximum dimensions: 640x480 pixels'
validator = validators.ImageMaxSizeValidator(640, 0)
assert str(validator.get_help_text()) == 'Maximum image width: 640 pixels'
validator = validators.ImageMaxSizeValidator(0, 480)
assert str(validator.get_help_text()) == 'Maximum image height: 480 pixels'
|
from webdav.exceptions import *
from webdav.urn import Urn
from os.path import exists
class ConnectionSettings:
def is_valid(self):
pass
def valid(self):
try:
self.is_valid()
except OptionNotValid:
return False
else:
return True
class WebDAVSettings(ConnectionSettings):
ns = "webdav:"
prefix = "webdav_"
keys = {'hostname', 'login', 'password', 'token', 'root', 'cert_path', 'key_path', 'recv_speed', 'send_speed', 'verbose', 'conn_timeout'}
def __init__(self, options):
self.options = dict()
for key in self.keys:
value = options.get(key, '')
self.options[key] = value
self.__dict__[key] = value
self.root = Urn(self.root).quote() if self.root else ''
self.root = self.root.rstrip(Urn.separate)
def is_valid(self):
if not self.hostname:
raise OptionNotValid(name="hostname", value=self.hostname, ns=self.ns)
if self.cert_path and not exists(self.cert_path):
raise OptionNotValid(name="cert_path", value=self.cert_path, ns=self.ns)
if self.key_path and not exists(self.key_path):
raise OptionNotValid(name="key_path", value=self.key_path, ns=self.ns)
if self.key_path and not self.cert_path:
raise OptionNotValid(name="cert_path", value=self.cert_path, ns=self.ns)
if self.password and not self.login:
raise OptionNotValid(name="login", value=self.login, ns=self.ns)
if not self.token and not self.login:
raise OptionNotValid(name="login", value=self.login, ns=self.ns)
class ProxySettings(ConnectionSettings):
ns = "proxy:"
prefix = "proxy_"
keys = {'hostname', 'login', 'password'}
def __init__(self, options):
self.options = dict()
for key in self.keys:
value = options.get(key, '')
self.options[key] = value
self.__dict__[key] = value
def is_valid(self):
if self.password and not self.login:
raise OptionNotValid(name="login", value=self.login, ns=self.ns)
if self.login or self.password:
if not self.hostname:
raise OptionNotValid(name="hostname", value=self.hostname, ns=self.ns)
|
# Copyright 2012 Jeffrey R. Spies
# License: Apache License, Version 2.0
# Website: http://jspi.es/benchmark
from . import __VERSION__, __URL__
from .Benchmark import Benchmark
import time
import platform
import os
import sys
class BenchmarkProgram(object):
def __init__(self, module="__main__", **kwargs):
if isinstance(module, str):
self.module = __import__(module)
benchmarks = self.loadFromModule(self.module)
totalRuns = 0
objects = []
for obj in benchmarks:
obj = obj(**kwargs)
obj.run()
objects.append(obj)
totalRuns += obj.getTotalRuns()
title = 'Benchmark Report'
info = 'Each of the above %s runs were run in random, non-consecutive order by' % str(totalRuns)
info += os.linesep
info += '`benchmark` v' + __VERSION__ + ' (' + __URL__ + ') with Python ' + platform.python_version()
info += os.linesep
info += '%s-%s-%s on %s' % (platform.system(), platform.release(), platform.machine(), time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())) + '.'
sys.stdout.write(self.printMarkdown(objects, title, info, **kwargs))
def printMarkdown(self, benchmarks, title, info, **kwargs):
lines = ''
lines += os.linesep
lines += title
lines += os.linesep + '='*len(title)
lines += os.linesep*2
for obj in benchmarks:
if obj.label:
title = obj.label
else:
title = obj.__class__.__name__
title = title.replace('_', ' ')
labelLength = len(title) if len(title) > 5 else 5
lines += title
lines += os.linesep
lines += '-'*labelLength
lines += os.linesep*2
lines += obj.getTable(**kwargs)
lines += os.linesep*2
lines += info
lines += os.linesep*2
return lines
def loadFromModule(self, module):
benchmarks = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, Benchmark):
benchmarks.append(obj)
return benchmarks
main = BenchmarkProgram
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from argparse import RawTextHelpFormatter
from jdcloud_cli.cement.ext.ext_argparse import expose
from jdcloud_cli.controllers.base_controller import BaseController
from jdcloud_cli.client_factory import ClientFactory
from jdcloud_cli.parameter_builder import collect_user_args, collect_user_headers
from jdcloud_cli.printer import Printer
from jdcloud_cli.skeleton import Skeleton
class VpcController(BaseController):
class Meta:
label = 'vpc'
help = '京东云VPC'
description = '''
vpc cli 子命令,VPC相关API。
OpenAPI文档地址为:https://docs.jdcloud.com/cn/virtual-private-cloud/api/overview
'''
stacked_on = 'base'
stacked_type = 'nested'
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--page-number'], dict(help="""(int) 页码, 默认为1, 取值范围:[1,∞), 页码超过总页数时, 显示最后一页 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小,默认为20,取值范围:[10,100] """, dest='pageSize', type=int, required=False)),
(['--filters'], dict(help="""(array: filter) elasticIpIds - elasticip id数组条件,支持多个; elasticIpAddress - eip的IP地址,支持单个; chargeStatus - eip的费用支付状态,normal(正常状态) or overdue(预付费已到期) or arrear(欠费状态),支持单个; """, dest='filters', required=False)),
(['--tags'], dict(help="""(array: tagFilter) Tag筛选条件 """, dest='tags', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询弹性ip列表 ''',
description='''
查询弹性ip列表。
示例: jdc vpc describe-elastic-ips
''',
)
def describe_elastic_ips(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeElasticIpsRequest import DescribeElasticIpsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeElasticIpsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--max-count'], dict(help="""(int) 购买弹性ip数量;取值范围:[1,100] """, dest='maxCount', type=int, required=True)),
(['--elastic-ip-address'], dict(help="""(string) 指定弹性ip地址进行创建,当申请创建多个弹性ip时,必须为空 """, dest='elasticIpAddress', required=False)),
(['--elastic-ip-spec'], dict(help="""(elasticIpSpec) 弹性ip规格 """, dest='elasticIpSpec', required=True)),
(['--user-tags'], dict(help="""(array: tag) 用户标签 """, dest='userTags', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建一个或者多个弹性Ip ''',
description='''
创建一个或者多个弹性Ip。
示例: jdc vpc create-elastic-ips --max-count 0 --elastic-ip-spec '{"":""}'
''',
)
def create_elastic_ips(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.CreateElasticIpsRequest import CreateElasticIpsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateElasticIpsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--elastic-ip-id'], dict(help="""(string) ElasticIp ID """, dest='elasticIpId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' ElasticIp资源信息详情 ''',
description='''
ElasticIp资源信息详情。
示例: jdc vpc describe-elastic-ip --elastic-ip-id xxx
''',
)
def describe_elastic_ip(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeElasticIpRequest import DescribeElasticIpRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeElasticIpRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--elastic-ip-id'], dict(help="""(string) ElasticIp ID """, dest='elasticIpId', required=True)),
(['--bandwidth-mbps'], dict(help="""(int) 弹性公网IP的限速(单位:Mbps),取值范围为[1-200] """, dest='bandwidthMbps', type=int, required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改弹性IP ''',
description='''
修改弹性IP。
示例: jdc vpc modify-elastic-ip --elastic-ip-id xxx --bandwidth-mbps 0
''',
)
def modify_elastic_ip(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.ModifyElasticIpRequest import ModifyElasticIpRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifyElasticIpRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--elastic-ip-id'], dict(help="""(string) ElasticIp ID """, dest='elasticIpId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除弹性Ip ''',
description='''
删除弹性Ip。
示例: jdc vpc delete-elastic-ip --elastic-ip-id xxx
''',
)
def delete_elastic_ip(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DeleteElasticIpRequest import DeleteElasticIpRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteElasticIpRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--page-number'], dict(help="""(int) 页码, 默认为1, 取值范围:[1,∞), 页码超过总页数时, 显示最后一页 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小,默认为20,取值范围:[10,100] """, dest='pageSize', type=int, required=False)),
(['--filters'], dict(help="""(array: filter) NA """, dest='filters', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询Acl列表 ''',
description='''
查询Acl列表。
示例: jdc vpc describe-network-acls
''',
)
def describe_network_acls(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeNetworkAclsRequest import DescribeNetworkAclsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeNetworkAclsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-id'], dict(help="""(string) 私有网络id """, dest='vpcId', required=True)),
(['--network-acl-name'], dict(help="""(string) networkAcl名称 """, dest='networkAclName', required=True)),
(['--description'], dict(help="""(string) 描述,允许输入UTF-8编码下的全部字符,不超过256字符 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建networkAcl接口 ''',
description='''
创建networkAcl接口。
示例: jdc vpc create-network-acl --vpc-id xxx --network-acl-name xxx
''',
)
def create_network_acl(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.CreateNetworkAclRequest import CreateNetworkAclRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateNetworkAclRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-acl-id'], dict(help="""(string) networkAclId ID """, dest='networkAclId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询networkAcl资源详情 ''',
description='''
查询networkAcl资源详情。
示例: jdc vpc describe-network-acl --network-acl-id xxx
''',
)
def describe_network_acl(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeNetworkAclRequest import DescribeNetworkAclRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeNetworkAclRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-acl-id'], dict(help="""(string) networkAclId ID """, dest='networkAclId', required=True)),
(['--network-acl-name'], dict(help="""(string) networkAcl名称,只允许输入中文、数字、大小写字母、英文下划线“_”及中划线“-”,不允许为空且不超过32字符 """, dest='networkAclName', required=False)),
(['--description'], dict(help="""(string) 描述,允许输入UTF-8编码下的全部字符,不超过256字符 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改networkAcl接口 ''',
description='''
修改networkAcl接口。
示例: jdc vpc modify-network-acl --network-acl-id xxx
''',
)
def modify_network_acl(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.ModifyNetworkAclRequest import ModifyNetworkAclRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifyNetworkAclRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-acl-id'], dict(help="""(string) networkAclId ID """, dest='networkAclId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除networkAcl接口 ''',
description='''
删除networkAcl接口。
示例: jdc vpc delete-network-acl --network-acl-id xxx
''',
)
def delete_network_acl(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DeleteNetworkAclRequest import DeleteNetworkAclRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteNetworkAclRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-acl-id'], dict(help="""(string) networkAclId ID """, dest='networkAclId', required=True)),
(['--subnet-ids'], dict(help="""(array: string) networkAcl要绑定的子网ID列表, subnet已被其他networkAcl绑定时,自动解绑 """, dest='subnetIds', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 给子网绑定networkAcl接口 ''',
description='''
给子网绑定networkAcl接口。
示例: jdc vpc associate-network-acl --network-acl-id xxx
''',
)
def associate_network_acl(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.AssociateNetworkAclRequest import AssociateNetworkAclRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = AssociateNetworkAclRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-acl-id'], dict(help="""(string) networkAclId ID """, dest='networkAclId', required=True)),
(['--subnet-id'], dict(help="""(string) networkAcl要解绑的子网ID """, dest='subnetId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 给子网解绑NetworkAcl接口 ''',
description='''
给子网解绑NetworkAcl接口。
示例: jdc vpc disassociate-network-acl --network-acl-id xxx --subnet-id xxx
''',
)
def disassociate_network_acl(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DisassociateNetworkAclRequest import DisassociateNetworkAclRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DisassociateNetworkAclRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-acl-id'], dict(help="""(string) networkAclId ID """, dest='networkAclId', required=True)),
(['--network-acl-rule-specs'], dict(help="""(array: addNetworkAclRuleSpec) networkAcl规则列表 """, dest='networkAclRuleSpecs', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 添加networkAcl规则接口 ''',
description='''
添加networkAcl规则接口。
示例: jdc vpc add-network-acl-rules --network-acl-id xxx --network-acl-rule-specs ['{"":""}']
''',
)
def add_network_acl_rules(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.AddNetworkAclRulesRequest import AddNetworkAclRulesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = AddNetworkAclRulesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-acl-id'], dict(help="""(string) networkAclId ID """, dest='networkAclId', required=True)),
(['--rule-ids'], dict(help="""(array: string) networkAcl规则ID列表 """, dest='ruleIds', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 移除networkAcl规则 ''',
description='''
移除networkAcl规则。
示例: jdc vpc remove-network-acl-rules --network-acl-id xxx
''',
)
def remove_network_acl_rules(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.RemoveNetworkAclRulesRequest import RemoveNetworkAclRulesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = RemoveNetworkAclRulesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-acl-id'], dict(help="""(string) networkAclId ID """, dest='networkAclId', required=True)),
(['--modify-network-acl-rule-specs'], dict(help="""(array: modifyNetworkAclRuleSpec) networkAcl规则列表 """, dest='modifyNetworkAclRuleSpecs', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改networkAcl接口 ''',
description='''
修改networkAcl接口。
示例: jdc vpc modify-network-acl-rules --network-acl-id xxx --modify-network-acl-rule-specs ['{"":""}']
''',
)
def modify_network_acl_rules(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.ModifyNetworkAclRulesRequest import ModifyNetworkAclRulesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifyNetworkAclRulesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--page-number'], dict(help="""(int) 页码, 默认为1, 取值范围:[1,∞), 页码超过总页数时, 显示最后一页 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小,默认为20,取值范围:[10,100] """, dest='pageSize', type=int, required=False)),
(['--filters'], dict(help="""(array: filter) networkInterfaceIds - 弹性网卡ID列表,支持多个; networkInterfaceNames - 弹性网卡名称列表,支持多个; vpcId - 弹性网卡所属vpc Id,支持单个; subnetId - 弹性网卡所属子网Id,支持单个; role - 网卡角色,取值范围:Primary(主网卡)、Secondary(辅助网卡)、Managed (受管网卡),支持单个; """, dest='filters', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询弹性网卡列表 ''',
description='''
查询弹性网卡列表。
示例: jdc vpc describe-network-interfaces
''',
)
def describe_network_interfaces(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeNetworkInterfacesRequest import DescribeNetworkInterfacesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeNetworkInterfacesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--subnet-id'], dict(help="""(string) 子网ID """, dest='subnetId', required=True)),
(['--az'], dict(help="""(string) 可用区,用户的默认可用区,该参数无效,不建议使用 """, dest='az', required=False)),
(['--network-interface-name'], dict(help="""(string) 网卡名称,只允许输入中文、数字、大小写字母、英文下划线“_”及中划线“-”,不允许为空且不超过32字符。 """, dest='networkInterfaceName', required=False)),
(['--primary-ip-address'], dict(help="""(string) 网卡主IP,如果不指定,会自动从子网中分配 """, dest='primaryIpAddress', required=False)),
(['--secondary-ip-addresses'], dict(help="""(array: string) SecondaryIp列表 """, dest='secondaryIpAddresses', required=False)),
(['--secondary-ip-count'], dict(help="""(int) 自动分配的SecondaryIp数量 """, dest='secondaryIpCount', type=int, required=False)),
(['--security-groups'], dict(help="""(array: string) 要绑定的安全组ID列表,最多指定5个安全组 """, dest='securityGroups', required=False)),
(['--sanity-check'], dict(help="""(int) 源和目标IP地址校验,取值为0或者1,默认为1 """, dest='sanityCheck', type=int, required=False)),
(['--description'], dict(help="""(string) 描述, 允许输入UTF-8编码下的全部字符,不超过256字符 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建网卡接口,只能创建辅助网卡 ''',
description='''
创建网卡接口,只能创建辅助网卡。
示例: jdc vpc create-network-interface --subnet-id xxx
''',
)
def create_network_interface(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.CreateNetworkInterfaceRequest import CreateNetworkInterfaceRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateNetworkInterfaceRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-interface-id'], dict(help="""(string) networkInterface ID """, dest='networkInterfaceId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询弹性网卡信息详情 ''',
description='''
查询弹性网卡信息详情。
示例: jdc vpc describe-network-interface --network-interface-id xxx
''',
)
def describe_network_interface(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeNetworkInterfaceRequest import DescribeNetworkInterfaceRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeNetworkInterfaceRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-interface-id'], dict(help="""(string) networkInterface ID """, dest='networkInterfaceId', required=True)),
(['--network-interface-name'], dict(help="""(string) 弹性网卡名称,只允许输入中文、数字、大小写字母、英文下划线“_”及中划线“-”,不允许为空且不超过32字符 """, dest='networkInterfaceName', required=False)),
(['--description'], dict(help="""(string) 描述,允许输入UTF-8编码下的全部字符,不超过256字符 """, dest='description', required=False)),
(['--security-groups'], dict(help="""(array: string) 以覆盖原有安全组的方式更新的安全组。如果更新安全组ID列表,最多5个安全组 """, dest='securityGroups', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改弹性网卡接口 ''',
description='''
修改弹性网卡接口。
示例: jdc vpc modify-network-interface --network-interface-id xxx
''',
)
def modify_network_interface(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.ModifyNetworkInterfaceRequest import ModifyNetworkInterfaceRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifyNetworkInterfaceRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-interface-id'], dict(help="""(string) networkInterface ID """, dest='networkInterfaceId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除弹性网卡接口 ''',
description='''
删除弹性网卡接口。
示例: jdc vpc delete-network-interface --network-interface-id xxx
''',
)
def delete_network_interface(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DeleteNetworkInterfaceRequest import DeleteNetworkInterfaceRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteNetworkInterfaceRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-interface-id'], dict(help="""(string) networkInterface ID """, dest='networkInterfaceId', required=True)),
(['--elastic-ip-id'], dict(help="""(string) 绑定的弹性Ip Id """, dest='elasticIpId', required=False)),
(['--private-ip-address'], dict(help="""(string) 绑定弹性Ip到指定的privateIp """, dest='privateIpAddress', required=False)),
(['--elastic-ip-address'], dict(help="""(string) 绑定的弹性Ip地址 """, dest='elasticIpAddress', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 给网卡绑定弹性Ip接口 ''',
description='''
给网卡绑定弹性Ip接口。
示例: jdc vpc associate-elastic-ip --network-interface-id xxx
''',
)
def associate_elastic_ip(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.AssociateElasticIpRequest import AssociateElasticIpRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = AssociateElasticIpRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-interface-id'], dict(help="""(string) networkInterface ID """, dest='networkInterfaceId', required=True)),
(['--elastic-ip-id'], dict(help="""(string) 指定解绑的弹性Ip Id """, dest='elasticIpId', required=False)),
(['--elastic-ip-address'], dict(help="""(string) 指定解绑的弹性Ip地址 """, dest='elasticIpAddress', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 给网卡解绑弹性Ip接口 ''',
description='''
给网卡解绑弹性Ip接口。
示例: jdc vpc disassociate-elastic-ip --network-interface-id xxx
''',
)
def disassociate_elastic_ip(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DisassociateElasticIpRequest import DisassociateElasticIpRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DisassociateElasticIpRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-interface-id'], dict(help="""(string) networkInterface ID """, dest='networkInterfaceId', required=True)),
(['--force'], dict(help="""(bool) secondary ip被其他接口占用时,是否抢占。false:非抢占重分配,true:抢占重分配,默认抢占重分配。默认值:true """, dest='force', required=False)),
(['--secondary-ips'], dict(help="""(array: string) 指定分配的secondaryIp地址 """, dest='secondaryIps', required=False)),
(['--secondary-ip-count'], dict(help="""(int) 指定自动分配的secondaryIp个数 """, dest='secondaryIpCount', type=int, required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 给网卡分配secondaryIp接口 ''',
description='''
给网卡分配secondaryIp接口。
示例: jdc vpc assign-secondary-ips --network-interface-id xxx
''',
)
def assign_secondary_ips(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.AssignSecondaryIpsRequest import AssignSecondaryIpsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = AssignSecondaryIpsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-interface-id'], dict(help="""(string) networkInterface ID """, dest='networkInterfaceId', required=True)),
(['--secondary-ips'], dict(help="""(array: string) 指定删除的secondaryIp地址 """, dest='secondaryIps', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 给网卡删除secondaryIp接口 ''',
description='''
给网卡删除secondaryIp接口。
示例: jdc vpc unassign-secondary-ips --network-interface-id xxx
''',
)
def unassign_secondary_ips(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.UnassignSecondaryIpsRequest import UnassignSecondaryIpsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = UnassignSecondaryIpsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--page-number'], dict(help="""(int) 页码, 默认为1, 取值范围:[1,∞), 页码超过总页数时, 显示最后一页 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小,默认为20,取值范围:[10,100] """, dest='pageSize', type=int, required=False)),
(['--filters'], dict(help="""(array: filter) networkSecurityGroupIds - 安全组ID列表,支持多个; networkSecurityGroupNames - 安全组名称列表,支持多个; vpcId - 安全组所属vpc Id,支持单个; """, dest='filters', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询安全组列表 ''',
description='''
查询安全组列表。
示例: jdc vpc describe-network-security-groups
''',
)
def describe_network_security_groups(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeNetworkSecurityGroupsRequest import DescribeNetworkSecurityGroupsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeNetworkSecurityGroupsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-id'], dict(help="""(string) 私有网络ID """, dest='vpcId', required=True)),
(['--network-security-group-name'], dict(help="""(string) 安全组名称,只允许输入中文、数字、大小写字母、英文下划线“_”及中划线“-”,不允许为空且不超过32字符。 """, dest='networkSecurityGroupName', required=True)),
(['--description'], dict(help="""(string) 描述, 允许输入UTF-8编码下的全部字符,不超过256字符 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建安全组 ''',
description='''
创建安全组。
示例: jdc vpc create-network-security-group --vpc-id xxx --network-security-group-name xxx
''',
)
def create_network_security_group(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.CreateNetworkSecurityGroupRequest import CreateNetworkSecurityGroupRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateNetworkSecurityGroupRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-security-group-id'], dict(help="""(string) NetworkSecurityGroup ID """, dest='networkSecurityGroupId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询安全组信息详情 ''',
description='''
查询安全组信息详情。
示例: jdc vpc describe-network-security-group --network-security-group-id xxx
''',
)
def describe_network_security_group(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeNetworkSecurityGroupRequest import DescribeNetworkSecurityGroupRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeNetworkSecurityGroupRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-security-group-id'], dict(help="""(string) NetworkSecurityGroup ID """, dest='networkSecurityGroupId', required=True)),
(['--network-security-group-name'], dict(help="""(string) 安全组的名字。名称取值范围:1-32个中文、英文大小写的字母、数字和下划线分隔符 """, dest='networkSecurityGroupName', required=False)),
(['--description'], dict(help="""(string) 安全组的描述,取值范围:0-256个UTF-8编码下的全部字符 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改安全组属性 ''',
description='''
修改安全组属性。
示例: jdc vpc modify-network-security-group --network-security-group-id xxx
''',
)
def modify_network_security_group(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.ModifyNetworkSecurityGroupRequest import ModifyNetworkSecurityGroupRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifyNetworkSecurityGroupRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-security-group-id'], dict(help="""(string) NetworkSecurityGroup ID """, dest='networkSecurityGroupId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除安全组 ''',
description='''
删除安全组。
示例: jdc vpc delete-network-security-group --network-security-group-id xxx
''',
)
def delete_network_security_group(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DeleteNetworkSecurityGroupRequest import DeleteNetworkSecurityGroupRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteNetworkSecurityGroupRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-security-group-id'], dict(help="""(string) NetworkSecurityGroup ID """, dest='networkSecurityGroupId', required=True)),
(['--network-security-group-rule-specs'], dict(help="""(array: addSecurityGroupRules) 安全组规则信息 """, dest='networkSecurityGroupRuleSpecs', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 添加安全组规则 ''',
description='''
添加安全组规则。
示例: jdc vpc add-network-security-group-rules --network-security-group-id xxx --network-security-group-rule-specs ['{"":""}']
''',
)
def add_network_security_group_rules(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.AddNetworkSecurityGroupRulesRequest import AddNetworkSecurityGroupRulesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = AddNetworkSecurityGroupRulesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-security-group-id'], dict(help="""(string) NetworkSecurityGroup ID """, dest='networkSecurityGroupId', required=True)),
(['--rule-ids'], dict(help="""(array: string) 安全组规则Id列表 """, dest='ruleIds', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 移除安全组规则 ''',
description='''
移除安全组规则。
示例: jdc vpc remove-network-security-group-rules --network-security-group-id xxx
''',
)
def remove_network_security_group_rules(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.RemoveNetworkSecurityGroupRulesRequest import RemoveNetworkSecurityGroupRulesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = RemoveNetworkSecurityGroupRulesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--network-security-group-id'], dict(help="""(string) NetworkSecurityGroup ID """, dest='networkSecurityGroupId', required=True)),
(['--modify-security-group-rule-specs'], dict(help="""(array: modifySecurityGroupRules) 安全组规则信息 """, dest='modifySecurityGroupRuleSpecs', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改安全组规则 ''',
description='''
修改安全组规则。
示例: jdc vpc modify-network-security-group-rules --network-security-group-id xxx --modify-security-group-rule-specs ['{"":""}']
''',
)
def modify_network_security_group_rules(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.ModifyNetworkSecurityGroupRulesRequest import ModifyNetworkSecurityGroupRulesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifyNetworkSecurityGroupRulesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--type'], dict(help="""(string) 资源类型,取值范围:vpc、elastic_ip、subnet、security_group、vpcpeering、network_interface(配额只统计辅助网卡)、acl、aclRule、routeTable、staticRoute、propagatedRoute、securityGroupRule """, dest='type', required=True)),
(['--parent-resource-id'], dict(help="""(string) type为vpc、elastic_ip、network_interface不设置, type为subnet、security_group、vpcpeering、acl、routeTable设置为vpcId, type为aclRule设置为aclId, type为staticRoute、propagatedRoute设置为routeTableId, type为securityGroupRule为securityGroupId """, dest='parentResourceId', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询配额信息 ''',
description='''
查询配额信息。
示例: jdc vpc describe-quota --type xxx
''',
)
def describe_quota(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeQuotaRequest import DescribeQuotaRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeQuotaRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--page-number'], dict(help="""(int) 页码, 默认为1, 取值范围:[1,∞), 页码超过总页数时, 显示最后一页 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小,默认为20,取值范围:[10,100] """, dest='pageSize', type=int, required=False)),
(['--filters'], dict(help="""(array: filter) routeTableIds - 路由表ID列表,支持多个; routeTableNames - 路由表名称列表,支持多个; vpcId - 路由表所属vpc Id,支持单个; """, dest='filters', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询路由表列表 ''',
description='''
查询路由表列表。
示例: jdc vpc describe-route-tables
''',
)
def describe_route_tables(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeRouteTablesRequest import DescribeRouteTablesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeRouteTablesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-id'], dict(help="""(string) 路由表所属的私有网络ID """, dest='vpcId', required=True)),
(['--route-table-name'], dict(help="""(string) 路由表名称,只允许输入中文、数字、大小写字母、英文下划线“_”及中划线“-”,不允许为空且不超过32字符。 """, dest='routeTableName', required=True)),
(['--description'], dict(help="""(string) 描述, 允许输入UTF-8编码下的全部字符,不超过256字符 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建路由表 ''',
description='''
创建路由表。
示例: jdc vpc create-route-table --vpc-id xxx --route-table-name xxx
''',
)
def create_route_table(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.CreateRouteTableRequest import CreateRouteTableRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateRouteTableRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--route-table-id'], dict(help="""(string) RouteTable ID """, dest='routeTableId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询路由表信息详情 ''',
description='''
查询路由表信息详情。
示例: jdc vpc describe-route-table --route-table-id xxx
''',
)
def describe_route_table(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeRouteTableRequest import DescribeRouteTableRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeRouteTableRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--route-table-id'], dict(help="""(string) RouteTable ID """, dest='routeTableId', required=True)),
(['--route-table-name'], dict(help="""(string) 路由表的名字。名称取值范围:1-32个中文、英文大小写的字母、数字和下划线分隔符 """, dest='routeTableName', required=False)),
(['--description'], dict(help="""(string) 路由表的描述,取值范围:0-256个UTF-8编码下的全部字符 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改路由表属性 ''',
description='''
修改路由表属性。
示例: jdc vpc modify-route-table --route-table-id xxx
''',
)
def modify_route_table(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.ModifyRouteTableRequest import ModifyRouteTableRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifyRouteTableRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--route-table-id'], dict(help="""(string) RouteTable ID """, dest='routeTableId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除路由表 ''',
description='''
删除路由表。
示例: jdc vpc delete-route-table --route-table-id xxx
''',
)
def delete_route_table(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DeleteRouteTableRequest import DeleteRouteTableRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteRouteTableRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--route-table-id'], dict(help="""(string) RouteTable ID """, dest='routeTableId', required=True)),
(['--route-table-rule-specs'], dict(help="""(array: addRouteTableRules) 路由表规则信息 """, dest='routeTableRuleSpecs', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 添加路由表规则 ''',
description='''
添加路由表规则。
示例: jdc vpc add-route-table-rules --route-table-id xxx --route-table-rule-specs ['{"":""}']
''',
)
def add_route_table_rules(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.AddRouteTableRulesRequest import AddRouteTableRulesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = AddRouteTableRulesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--route-table-id'], dict(help="""(string) RouteTable ID """, dest='routeTableId', required=True)),
(['--rule-ids'], dict(help="""(array: string) 路由表规则Id列表 """, dest='ruleIds', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 移除路由表规则 ''',
description='''
移除路由表规则。
示例: jdc vpc remove-route-table-rules --route-table-id xxx
''',
)
def remove_route_table_rules(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.RemoveRouteTableRulesRequest import RemoveRouteTableRulesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = RemoveRouteTableRulesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--route-table-id'], dict(help="""(string) RouteTable ID """, dest='routeTableId', required=True)),
(['--modify-route-table-rule-specs'], dict(help="""(array: modifyRouteTableRules) 路由表规则信息 """, dest='modifyRouteTableRuleSpecs', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改路由表规则 ''',
description='''
修改路由表规则。
示例: jdc vpc modify-route-table-rules --route-table-id xxx --modify-route-table-rule-specs ['{"":""}']
''',
)
def modify_route_table_rules(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.ModifyRouteTableRulesRequest import ModifyRouteTableRulesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifyRouteTableRulesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--route-table-id'], dict(help="""(string) RouteTable ID """, dest='routeTableId', required=True)),
(['--subnet-ids'], dict(help="""(array: string) 路由表要绑定的子网ID列表, subnet已被其他路由表绑定时,自动解绑。 """, dest='subnetIds', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 路由表绑定子网接口 ''',
description='''
路由表绑定子网接口。
示例: jdc vpc associate-route-table --route-table-id xxx
''',
)
def associate_route_table(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.AssociateRouteTableRequest import AssociateRouteTableRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = AssociateRouteTableRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--route-table-id'], dict(help="""(string) RouteTable ID """, dest='routeTableId', required=True)),
(['--subnet-id'], dict(help="""(string) 路由表要解绑的子网ID,解绑后子网绑定默认路由表 """, dest='subnetId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 给路由表解绑子网接口 ''',
description='''
给路由表解绑子网接口。
示例: jdc vpc disassociate-route-table --route-table-id xxx --subnet-id xxx
''',
)
def disassociate_route_table(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DisassociateRouteTableRequest import DisassociateRouteTableRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DisassociateRouteTableRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--page-number'], dict(help="""(int) 页码, 默认为1, 取值范围:[1,∞), 页码超过总页数时, 显示最后一页 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小,默认为20,取值范围:[10,100] """, dest='pageSize', type=int, required=False)),
(['--filters'], dict(help="""(array: filter) subnetIds - subnet ID列表,支持多个; subnetNames - subnet名称列表,支持多个; routeTableId - 子网关联路由表Id,支持单个; aclId - 子网关联acl Id,支持单个; vpcId - 子网所属VPC Id,支持单个; """, dest='filters', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询子网列表 ''',
description='''
查询子网列表。
示例: jdc vpc describe-subnets
''',
)
def describe_subnets(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeSubnetsRequest import DescribeSubnetsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeSubnetsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-id'], dict(help="""(string) 子网所属vpc的Id """, dest='vpcId', required=True)),
(['--subnet-name'], dict(help="""(string) 子网名称,只允许输入中文、数字、大小写字母、英文下划线“_”及中划线“-”,不允许为空且不超过32字符。 """, dest='subnetName', required=True)),
(['--address-prefix'], dict(help="""(string) 子网网段,vpc内子网网段不能重叠,cidr的取值范围:10.0.0.0/8、172.16.0.0/12和192.168.0.0/16及它们包含的子网,且子网掩码长度为16-28之间,如果vpc含有cidr,则必须为vpc所在cidr的子网 """, dest='addressPrefix', required=True)),
(['--route-table-id'], dict(help="""(string) 子网关联的路由表Id, 默认为vpc的默认路由表 """, dest='routeTableId', required=False)),
(['--description'], dict(help="""(string) 子网描述信息,允许输入UTF-8编码下的全部字符,不超过256字符。 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建子网 ''',
description='''
创建子网。
示例: jdc vpc create-subnet --vpc-id xxx --subnet-name xxx --address-prefix xxx
''',
)
def create_subnet(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.CreateSubnetRequest import CreateSubnetRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateSubnetRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--subnet-id'], dict(help="""(string) Subnet ID """, dest='subnetId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询子网信息详情 ''',
description='''
查询子网信息详情。
示例: jdc vpc describe-subnet --subnet-id xxx
''',
)
def describe_subnet(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeSubnetRequest import DescribeSubnetRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeSubnetRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--subnet-id'], dict(help="""(string) Subnet ID """, dest='subnetId', required=True)),
(['--subnet-name'], dict(help="""(string) 子网名称,只允许输入中文、数字、大小写字母、英文下划线“_”及中划线“-”,不允许为空且不超过32字符。 """, dest='subnetName', required=False)),
(['--description'], dict(help="""(string) 子网描述信息,允许输入UTF-8编码下的全部字符,不超过256字符。 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改子网接口 ''',
description='''
修改子网接口。
示例: jdc vpc modify-subnet --subnet-id xxx
''',
)
def modify_subnet(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.ModifySubnetRequest import ModifySubnetRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifySubnetRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--subnet-id'], dict(help="""(string) Subnet ID """, dest='subnetId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除子网 ''',
description='''
删除子网。
示例: jdc vpc delete-subnet --subnet-id xxx
''',
)
def delete_subnet(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DeleteSubnetRequest import DeleteSubnetRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteSubnetRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--page-number'], dict(help="""(int) 页码, 默认为1, 取值范围:[1,∞), 页码超过总页数时, 显示最后一页 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小,默认为20,取值范围:[10,100] """, dest='pageSize', type=int, required=False)),
(['--filters'], dict(help="""(array: filter) vpcIds - vpc ID列表,支持多个; vpcNames - vpc名称列表,支持多个; """, dest='filters', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询私有网络列表 ''',
description='''
查询私有网络列表。
示例: jdc vpc describe-vpcs
''',
)
def describe_vpcs(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeVpcsRequest import DescribeVpcsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeVpcsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-name'], dict(help="""(string) 私有网络名称,只允许输入中文、数字、大小写字母、英文下划线“_”及中划线“-”,不允许为空且不超过32字符。 """, dest='vpcName', required=True)),
(['--address-prefix'], dict(help="""(string) 如果为空,则不限制网段,如果不为空,10.0.0.0/8、172.16.0.0/12和192.168.0.0/16及它们包含的子网,且子网掩码长度为16-28之间 """, dest='addressPrefix', required=False)),
(['--description'], dict(help="""(string) vpc描述,允许输入UTF-8编码下的全部字符,不超过256字符。 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建私有网络 ''',
description='''
创建私有网络。
示例: jdc vpc create-vpc --vpc-name xxx
''',
)
def create_vpc(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.CreateVpcRequest import CreateVpcRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateVpcRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-id'], dict(help="""(string) Vpc ID """, dest='vpcId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询Vpc信息详情 ''',
description='''
查询Vpc信息详情。
示例: jdc vpc describe-vpc --vpc-id xxx
''',
)
def describe_vpc(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeVpcRequest import DescribeVpcRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeVpcRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-id'], dict(help="""(string) Vpc ID """, dest='vpcId', required=True)),
(['--vpc-name'], dict(help="""(string) 私有网络名称,只允许输入中文、数字、大小写字母、英文下划线“_”及中划线“-”,不允许为空且不超过32字符。 """, dest='vpcName', required=False)),
(['--description'], dict(help="""(string) vpc描述,允许输入UTF-8编码下的全部字符,不超过256字符。 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改私有网络接口 ''',
description='''
修改私有网络接口。
示例: jdc vpc modify-vpc --vpc-id xxx
''',
)
def modify_vpc(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.ModifyVpcRequest import ModifyVpcRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifyVpcRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-id'], dict(help="""(string) Vpc ID """, dest='vpcId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除私有网络 ''',
description='''
删除私有网络。
示例: jdc vpc delete-vpc --vpc-id xxx
''',
)
def delete_vpc(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DeleteVpcRequest import DeleteVpcRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteVpcRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--page-number'], dict(help="""(int) 页码, 默认为1, 取值范围:[1,∞), 页码超过总页数时, 显示最后一页 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小,默认为20,取值范围:[10,100] """, dest='pageSize', type=int, required=False)),
(['--filters'], dict(help="""(array: filter) vpcPeeringIds - vpcPeering ID,支持多个; vpcPeeringNames - vpcPeering名称列表,支持多个; vpcId - vpcPeering本端Vpc Id,支持单个; remoteVpcId - vpcPeering对端Vpc Id,支持单个; """, dest='filters', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询VpcPeering资源列表 ''',
description='''
查询VpcPeering资源列表。
示例: jdc vpc describe-vpc-peerings
''',
)
def describe_vpc_peerings(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeVpcPeeringsRequest import DescribeVpcPeeringsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeVpcPeeringsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-peering-name'], dict(help="""(string) VpcPeering的名字,不为空。名称取值范围:1-32个中文、英文大小写的字母、数字和下划线分隔符 """, dest='vpcPeeringName', required=True)),
(['--vpc-id'], dict(help="""(string) VpcPeering本端Vpc的Id """, dest='vpcId', required=True)),
(['--remote-vpc-id'], dict(help="""(string) VpcPeering对端Vpc的Id """, dest='remoteVpcId', required=True)),
(['--description'], dict(help="""(string) VpcPeering 描述,取值范围:0-256个中文、英文大小写的字母、数字和下划线分隔符 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建VpcPeering接口 ''',
description='''
创建VpcPeering接口。
示例: jdc vpc create-vpc-peering --vpc-peering-name xxx --vpc-id xxx --remote-vpc-id xxx
''',
)
def create_vpc_peering(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.CreateVpcPeeringRequest import CreateVpcPeeringRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateVpcPeeringRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-peering-id'], dict(help="""(string) vpcPeeringId ID """, dest='vpcPeeringId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询VpcPeering资源详情 ''',
description='''
查询VpcPeering资源详情。
示例: jdc vpc describe-vpc-peering --vpc-peering-id xxx
''',
)
def describe_vpc_peering(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DescribeVpcPeeringRequest import DescribeVpcPeeringRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeVpcPeeringRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-peering-id'], dict(help="""(string) vpcPeeringId ID """, dest='vpcPeeringId', required=True)),
(['--vpc-peering-name'], dict(help="""(string) VpcPeering的名字,不为空。名称取值范围:1-32个中文、英文大小写的字母、数字和下划线分隔符 """, dest='vpcPeeringName', required=False)),
(['--description'], dict(help="""(string) VpcPeering 描述,取值范围:0-256个中文、英文大小写的字母、数字和下划线分隔符 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改VpcPeering接口 ''',
description='''
修改VpcPeering接口。
示例: jdc vpc modify-vpc-peering --vpc-peering-id xxx
''',
)
def modify_vpc_peering(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.ModifyVpcPeeringRequest import ModifyVpcPeeringRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifyVpcPeeringRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID """, dest='regionId', required=False)),
(['--vpc-peering-id'], dict(help="""(string) vpcPeeringId ID """, dest='vpcPeeringId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除VpcPeering接口 ''',
description='''
删除VpcPeering接口。
示例: jdc vpc delete-vpc-peering --vpc-peering-id xxx
''',
)
def delete_vpc_peering(self):
client_factory = ClientFactory('vpc')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.vpc.apis.DeleteVpcPeeringRequest import DeleteVpcPeeringRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteVpcPeeringRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--api'], dict(help="""(string) api name """, choices=['describe-elastic-ips','create-elastic-ips','describe-elastic-ip','modify-elastic-ip','delete-elastic-ip','describe-network-acls','create-network-acl','describe-network-acl','modify-network-acl','delete-network-acl','associate-network-acl','disassociate-network-acl','add-network-acl-rules','remove-network-acl-rules','modify-network-acl-rules','describe-network-interfaces','create-network-interface','describe-network-interface','modify-network-interface','delete-network-interface','associate-elastic-ip','disassociate-elastic-ip','assign-secondary-ips','unassign-secondary-ips','describe-network-security-groups','create-network-security-group','describe-network-security-group','modify-network-security-group','delete-network-security-group','add-network-security-group-rules','remove-network-security-group-rules','modify-network-security-group-rules','describe-quota','describe-route-tables','create-route-table','describe-route-table','modify-route-table','delete-route-table','add-route-table-rules','remove-route-table-rules','modify-route-table-rules','associate-route-table','disassociate-route-table','describe-subnets','create-subnet','describe-subnet','modify-subnet','delete-subnet','describe-vpcs','create-vpc','describe-vpc','modify-vpc','delete-vpc','describe-vpc-peerings','create-vpc-peering','describe-vpc-peering','modify-vpc-peering','delete-vpc-peering',], required=True)),
],
formatter_class=RawTextHelpFormatter,
help=''' 生成单个API接口的json骨架空字符串 ''',
description='''
生成单个API接口的json骨架空字符串。
示例: jdc nc generate-skeleton --api describeContainer ''',
)
def generate_skeleton(self):
skeleton = Skeleton('vpc', self.app.pargs.api)
skeleton.show()
|
import pymongo
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.test
collection = db.students
count = collection.find().count()
print(count)
|
import django_tables2 as tables
from django_tables2.utils import A # alias for Accessor
from website.apps.core.models import Source, Culture, Language
# Note, due to the current version of django_tables2 not merging in Meta classes
# https://github.com/bradleyayers/django-tables2/issues/85
# The work around is to inherit class Meta in the subclasses e.g.
# class Child(DataTable):
# class Meta(DataTable.Meta):
# pass
#
# another annoyance is that you can't override anything in Meta - it'll cause a
# NameError to be raised. The work around is this:
#
# class Child(DataTable):
# class Meta(DataTable.Meta):
# pass
# Meta.var = X
#
# Ugly, but it works.
class DataTable(tables.Table):
"""Parent class for Datatables"""
class Meta:
orderable = True
default = u''
attrs = {'class': "table table-bordered table-condensed", 'summary': ''}
class SourceIndexTable(DataTable):
"""Source Listing"""
author = tables.LinkColumn('source-detail', args=[A('slug')])
reference = tables.LinkColumn('source-detail', args=[A('slug')])
count = tables.LinkColumn('source-detail', args=[A('slug')])
class Meta(DataTable.Meta):
model = Source
exclude = ('id', 'editor', 'added', 'slug', 'comment', 'bibtex')
Meta.attrs['summary'] = 'Table of Sources'
class CultureIndexTable(DataTable):
"""Culture Listing"""
culture = tables.LinkColumn('culture-detail', args=[A('slug')])
class Meta(DataTable.Meta):
model = Culture
order_by = ('culture',)
sequence = ('culture',)
exclude = ('id', 'editor', 'added', 'slug', 'notes', 'languages')
Meta.attrs['summary'] = 'Table of Cultures'
class LanguageIndexTable(DataTable):
"""Language Listing"""
language = tables.LinkColumn('language-detail', args=[A('isocode')])
isocode = tables.LinkColumn('language-detail', args=[A('isocode')])
class Meta(DataTable.Meta):
model = Language
order_by = ('language',)
sequence = ('isocode', 'language', 'classification', 'abvdcode',)
exclude = ('id', 'editor', 'added', 'slug', 'notes',)
Meta.attrs['summary'] = 'Table of Languages'
|
# -*- coding: utf-8 -*-
import fs
from fs.base import FS
from malibu.text import parse_uri
from peewee import (
AutoField,
TextField,
)
from tubedlapi.model import BaseModel
from tubedlapi.model.fields import EncryptedBlobField
class Destination(BaseModel):
id = AutoField(primary_key=True)
name = TextField(unique=True)
url = EncryptedBlobField()
@property
def as_fs(self) -> FS:
''' Returns a filesystem implementation derived from fs.base.FS
'''
return fs.open_fs(self.url)
@property
def sanitized_url(self) -> str:
''' Returns a sanitized version of the underlying storage URL.
'''
uri = parse_uri(self.url)
password_value = uri.get('password')
if not password_value:
return self.url
sanitized = self.url.replace(password_value, '****')
return sanitized
def to_dict(self):
''' Dictionary representation of a Destination object
'''
return {
'id': self.id,
'name': self.name,
'url': self.sanitized_url,
}
|
# some training parameters
EPOCHS = 200
BATCH_SIZE = 32
NUM_CLASSES = 2
image_height = 256
image_width = 192
channels = 1
save_model_dir = "saved_model_static/model"
dataset_dir = "dataset_parametric/"
train_dir = dataset_dir + "train"
valid_dir = dataset_dir + "valid"
test_dir = dataset_dir + "test/"
# choose a network
# model = "resnet18"
# model = "resnet34"
model = "resnet50"
# model = "resnet101"
# model = "resnet152"
|
import torch
import torch.nn as nn
import torch.jit as jit
from torch.nn import Parameter
from torch.nn import functional as F
from parametrization import Parametrization
import math
import time
class MomentumLSTM(nn.Module):
def __init__(self, input_size, hidden_size, mu, epsilon, bias=True):
super(MomentumLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.Tensor(4 * hidden_size, hidden_size))
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = nn.Parameter(torch.Tensor(4 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
# for momentumnet
self.mu = mu
self.epsilon = epsilon
self.reset_parameters(hidden_size)
def reset_parameters(self, hidden_size):
nn.init.orthogonal_(self.weight_ih)
nn.init.eye_(self.weight_hh)
nn.init.zeros_(self.bias_ih)
self.bias_ih.data[hidden_size:(2 * hidden_size)].fill_(1.0)
nn.init.zeros_(self.bias_hh)
self.bias_hh.data[hidden_size:(2 * hidden_size)].fill_(1.0)
def lstmcell(self, x, hidden, v):
hx, cx = hidden
hx = hx.squeeze() if hx.shape[1] > 1 else hx[0]
cx = cx.squeeze() if cx.shape[1] > 1 else cx[0]
x = x.view(-1, x.size(1))
v = v.squeeze() if v.shape[1] > 1 else v[0]
vy = self.mu * v + self.epsilon * (torch.mm(x, self.weight_ih.t()) + self.bias_ih)
gates = vy + (torch.mm(hx, self.weight_hh.t()) + self.bias_hh)
# gates = gates.squeeze()
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = torch.mul(cx, forgetgate) + torch.mul(ingate, cellgate)
hy = torch.mul(outgate, F.tanh(cy))
cy = cy.unsqueeze(0)
hy = hy.unsqueeze(0)
vy = vy.unsqueeze(0)
return hy, (hy, cy), vy
def forward(self, input_, hidden=None, v=None):
# input_ is of dimensionalty (#batch, time, input_size, ...)
outputs = []
for x in torch.unbind(input_, dim=0):
out_rnn, hidden, v = self.lstmcell(x, hidden, v)
if out_rnn.shape[1] > 1:
outputs.append(out_rnn.squeeze())
else:
outputs.append(out_rnn[0])
return torch.stack(outputs, dim=0), hidden, v
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.Tensor(4 * hidden_size, hidden_size))
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = nn.Parameter(torch.Tensor(4 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters(hidden_size)
def reset_parameters(self, hidden_size):
nn.init.orthogonal_(self.weight_ih)
nn.init.eye_(self.weight_hh)
nn.init.zeros_(self.bias_ih)
self.bias_ih.data[hidden_size:(2 * hidden_size)].fill_(1.0)
nn.init.zeros_(self.bias_hh)
self.bias_hh.data[hidden_size:(2 * hidden_size)].fill_(1.0)
def lstmcell(self, x, hidden):
hx, cx = hidden
hx = hx.squeeze() if hx.shape[1] > 1 else hx[0]
cx = cx.squeeze() if cx.shape[1] > 1 else cx[0]
x = x.view(-1, x.size(1))
gates = torch.mm(x, self.weight_ih.t()) + torch.mm(hx, self.weight_hh.t()) + self.bias_ih + self.bias_hh
# gates = gates.squeeze()
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = torch.mul(cx, forgetgate) + torch.mul(ingate, cellgate)
hy = torch.mul(outgate, F.tanh(cy))
cy = cy.unsqueeze(0)
hy = hy.unsqueeze(0)
return hy, (hy, cy)
def forward(self, input_, hidden=None):
# input_ is of dimensionalty (#batch, time, input_size, ...)
outputs = []
for x in torch.unbind(input_, dim=0):
out_rnn, hidden = self.lstmcell(x, hidden)
if out_rnn.shape[1] > 1:
outputs.append(out_rnn.squeeze())
else:
outputs.append(out_rnn[0])
return torch.stack(outputs, dim=0), hidden
class GRU(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(GRU, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size, hidden_size))
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size))
self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters(hidden_size)
def reset_parameters(self, hidden_size):
nn.init.orthogonal_(self.weight_ih)
nn.init.eye_(self.weight_hh)
nn.init.zeros_(self.bias_ih)
self.bias_ih.data[0:hidden_size].fill_(1.0)
nn.init.zeros_(self.bias_hh)
self.bias_hh.data[0:hidden_size].fill_(1.0)
def grucell(self, x, hidden):
hidden = hidden.squeeze() if hidden.shape[1] > 1 else hidden[0]
x = x.view(-1, x.size(1))
gi = torch.mm(x, self.weight_ih.t()) + self.bias_ih
gh = torch.mm(hidden, self.weight_hh.t()) + self.bias_hh
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)
resetgate = torch.sigmoid(i_r + h_r)
inputgate = torch.sigmoid(i_i + h_i)
newgate = torch.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
hy = hy.unsqueeze(0)
return hy, hy
def forward(self, input_, hidden=None):
# input_ is of dimensionalty (#batch, time, input_size, ...)
outputs = []
for x in torch.unbind(input_, dim=0):
out_rnn, hidden = self.grucell(x, hidden)
if out_rnn.shape[1] > 1:
outputs.append(out_rnn.squeeze())
else:
outputs.append(out_rnn[0])
return torch.stack(outputs, dim=0), hidden
class AdamLSTM(nn.Module):
def __init__(self, input_size, hidden_size, mu, epsilon, mus, bias=True):
super(AdamLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.Tensor(4 * hidden_size, hidden_size))
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = nn.Parameter(torch.Tensor(4 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
# for momentumnet
self.mu = mu
self.epsilon = epsilon
self.mus = mus
self.reset_parameters(hidden_size)
def reset_parameters(self, hidden_size):
nn.init.orthogonal_(self.weight_ih)
nn.init.eye_(self.weight_hh)
nn.init.zeros_(self.bias_ih)
self.bias_ih.data[hidden_size:(2 * hidden_size)].fill_(1.0)
nn.init.zeros_(self.bias_hh)
self.bias_hh.data[hidden_size:(2 * hidden_size)].fill_(1.0)
def lstmcell(self, x, hidden, v, s):
hx, cx = hidden
hx = hx.squeeze() if hx.shape[1] > 1 else hx[0]
cx = cx.squeeze() if cx.shape[1] > 1 else cx[0]
x = x.view(-1, x.size(1))
v = v.squeeze() if v.shape[1] > 1 else v[0]
s = s.squeeze() if s.shape[1] > 1 else s[0]
grad_val = torch.mm(x, self.weight_ih.t()) + self.bias_ih
vy = self.mu * v + self.epsilon * grad_val
sy = self.mus * s + (1.0 - self.mus) * (grad_val * grad_val)
gates = vy/torch.sqrt(sy + 1e-16) + (torch.mm(hx, self.weight_hh.t()) + self.bias_hh)
# gates = gates.squeeze()
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = torch.mul(cx, forgetgate) + torch.mul(ingate, cellgate)
hy = torch.mul(outgate, F.tanh(cy))
cy = cy.unsqueeze(0)
hy = hy.unsqueeze(0)
vy = vy.unsqueeze(0)
sy = sy.unsqueeze(0)
return hy, (hy, cy), vy, sy
def forward(self, input_, hidden=None, v=None, s=None):
# input_ is of dimensionalty (#batch, time, input_size, ...)
outputs = []
for x in torch.unbind(input_, dim=0):
out_rnn, hidden, v, s = self.lstmcell(x, hidden, v, s)
if out_rnn.shape[1] > 1:
outputs.append(out_rnn.squeeze())
else:
outputs.append(out_rnn[0])
return torch.stack(outputs, dim=0), hidden, v, s
class NesterovLSTM(nn.Module):
def __init__(self, input_size, hidden_size, epsilon, restart, bias=True):
super(NesterovLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.Tensor(4 * hidden_size, hidden_size))
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = nn.Parameter(torch.Tensor(4 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
# for momentumnet
self.epsilon = epsilon
self.restart = restart
self.reset_parameters(hidden_size)
def reset_parameters(self, hidden_size):
nn.init.orthogonal_(self.weight_ih)
nn.init.eye_(self.weight_hh)
nn.init.zeros_(self.bias_ih)
self.bias_ih.data[hidden_size:(2 * hidden_size)].fill_(1.0)
nn.init.zeros_(self.bias_hh)
self.bias_hh.data[hidden_size:(2 * hidden_size)].fill_(1.0)
def lstmcell(self, x, hidden, v, k):
hx, cx = hidden
hx = hx.squeeze() if hx.shape[1] > 1 else hx[0]
cx = cx.squeeze() if cx.shape[1] > 1 else cx[0]
x = x.view(-1, x.size(1))
v = v.squeeze() if v.shape[1] > 1 else v[0]
vy = (k-1.0)/(k+2.0) * v + self.epsilon * (torch.mm(x, self.weight_ih.t()) + self.bias_ih)
gates = vy + (torch.mm(hx, self.weight_hh.t()) + self.bias_hh)
# gates = gates.squeeze()
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = torch.mul(cx, forgetgate) + torch.mul(ingate, cellgate)
hy = torch.mul(outgate, F.tanh(cy))
cy = cy.unsqueeze(0)
hy = hy.unsqueeze(0)
vy = vy.unsqueeze(0)
return hy, (hy, cy), vy
def forward(self, input_, hidden=None, v=None):
# input_ is of dimensionalty (#batch, time, input_size, ...)
outputs = []
iter_indx = 0
for x in torch.unbind(input_, dim=0):
iter_indx = iter_indx + 1
out_rnn, hidden, v = self.lstmcell(x, hidden, v, k=iter_indx)
if self.restart > 0 and not (iter_indx % self.restart):
iter_indx = 0
if out_rnn.shape[1] > 1:
outputs.append(out_rnn.squeeze())
else:
outputs.append(out_rnn[0])
return torch.stack(outputs, dim=0), hidden, v
|
import flask
import data.db_session as db_session
from data.source import Location, Query, DataView, Subtype, LocationType, RequestMethod
from services.select_services import get_objects, search_object
from services.save_services import save_object
from flask_login import login_required
from decorators.admin import is_admin
blueprint = flask.Blueprint('process_edit', __name__, template_folder = '../../templates/process')
@blueprint.before_request
@login_required
@is_admin
def before_request():
""" Protect all of the admin endpoints. """
pass
type_variables = {
"location_template": "edit_location.html"
, "location_class": Location
, "query_template": "edit_query.html"
, "query_class": Query
, "view_template": "edit_view.html"
, "view_class": DataView
}
@blueprint.route('/<item_type>', methods=['POST', 'GET'])
def edit(item_type: str):
id = flask.request.args.get('id', default = None, type = int)
if flask.request.method == "GET":
session = db_session.create_session()
try:
locations = get_objects(Location, session)
subtypes = get_objects(Subtype, session)
request_methods = get_objects(RequestMethod, session)
location_types = get_objects(LocationType, session)
data_obj = search_object(id, type_variables[item_type+"_class"], session)
except Exception as error:
print(str(error))
finally:
session.close()
return flask.render_template(
type_variables[item_type+"_template"]
, item_type = item_type
, back_link = flask.request.referrer
, locations = locations
, subtypes = subtypes
, request_methods = request_methods
, location_types = location_types
, data_obj = data_obj
)
if flask.request.method == "POST":
data = flask.request.form
session = db_session.create_session()
try:
save_object(item_type, id, data, session)
session.commit()
except Exception as error:
print(str(error))
finally:
session.close()
return flask.redirect('/process')
|
"""
I’ll be using the Zippopotam.us REST API.
This API takes a country code and a zip code and returns
location data associated with that country and zip code.
For example, a GET request to http://api.zippopotam.us/us/90210
"""
from fastapi import FastAPI
import requests
from fastapi.testclient import TestClient
app = FastAPI()
client = TestClient(app)
@app.get("/")
def get_location(country_code: str, zip_code: int):
response = requests.get(f"http://api.zippopotam.us/{country_code}/{zip_code}")
response = response.json()
return {"data": response}
|
data = open("input.txt", "r").readlines()
polymer = data[0]
pair_insertion = {}
for line in data[2:]:
[token, replacement] = line.strip().split(" -> ")
pair_insertion[token] = replacement
result = [i for i in polymer.strip()]
for step in range(0, 10):
next = []
for i, si in enumerate(result):
if i < len(result)-1:
next.append(si)
next.append(pair_insertion[result[i]+result[i+1]])
else:
next.append(si)
result = next
count = [result.count(a) for a in set(pair_insertion.values())]
print("The answer of part 1 is", max(count) - min(count))
|
import arcade
import json
from typing import List, Dict
with open("leaderboard.json", "r") as f:
prim_data = json.load(f)
def msort(l: List[int]) -> List[int]:
# base case
if len(l) < 2:
return l
left_side = msort(l[:len(l) // 2])
right_side = msort(l[len(l) // 2:])
sorted_list = []
left_marker = 0
right_marker = 0
while left_marker < len(left_side) and right_marker < len(right_side):
if left_side[left_marker] < right_side[right_marker]:
sorted_list.append(left_side[left_marker])
left_marker += 1
else:
sorted_list.append(right_side[right_marker])
right_marker += 1
if left_marker == len(left_side):
sorted_list.extend(right_side[right_marker:])
if right_marker == len(right_side):
sorted_list.extend(left_side[left_marker:])
return sorted_list
def output(prim_data: Dict) -> Dict:
""" Sort the dictionary so that key (score) in descending order, value (time) in ascending order
Args:
prim_data: The original data where key reps score, value reps time
Returns:
a "sorted" dictionary that has score in descending order and time in ascending order
{30: [6, 5], 40: [8, 2]} -> {40: [2, 8], 30: [5, 6]}
"""
final_d = {}
for score in msort([int(s) for s in prim_data.keys()])[::-1]:
for v in msort(prim_data[str(score)]):
if score not in final_d:
final_d[score] = []
final_d[score].append(v)
return final_d
data = output(prim_data)
print(data)
def leaderboard(data: Dict) -> List[List[str]]:
leader_list = []
for score in data.keys():
for time in data[score]:
if len(leader_list) < 4:
leader_list.append([str(score), "{0}:{1}".format(time // 60, time % 60)])
else:
break
return leader_list
m = leaderboard(data)
print("m",m)
# input_final = [["50", "3:15"],["50", "4:15"], ["40", "3:15"], ["30", "3:15"]]
# input = ["50", "3:15"]
# Score = "6580"
# Time = "3:15"
# 先做一行
def slice_digit(s):
return [d if d != ":" else "hg" for d in list(s)]
def final_output():
scores = []
times = []
for i in m:
scores.append(slice_digit(i[0]))
d = i[1].find(":")
if len(i[1][d+1:]) <= 1:
i[1] = i[1][:d+1]+"0"+i[1][d+1:]
times.append(slice_digit(i[1]))
return scores, times
print(final_output()[0])
print(final_output()[1])
scores = final_output()[0]
times = final_output()[1]
SCREEN_WIDTH = 500
SCREEN_HEIGHT = 500
SCREEN_TITLE = "Drawing Text Example"
class MyGame(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
arcade.set_background_color(arcade.color.WHITE)
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
for i in range(len(scores)):
for j in range(len(scores[i])):
arcade.draw_texture_rectangle(100 + j * 20, 400 - i*40, 20, 24,
arcade.load_texture("number/n_" + str(scores[i][j]) + ".png"))
for i in range(len(times)):
for j in range(len(times[i])):
arcade.draw_texture_rectangle(300 + j * 20, 400 - i * 40, 20, 24,
arcade.load_texture("number/n_" + str(times[i][j]) + ".png"))
def main():
MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
arcade.run()
if __name__ == "__main__":
main()
|
import argparse
import unittest
from ffwd.ffwd_send import tag_type
class TestFFWDSend(unittest.TestCase):
def test_tag_type(self):
self.assertEquals(('hello', 'world'), tag_type("hello:world"))
self.assertEquals(('hello', 'world:two'), tag_type("hello:world:two"))
with self.assertRaises(argparse.ArgumentTypeError):
tag_type('hello')
|
import collections
import json
import logging
import argparse
from .. import init_logging
def main(args_):
parser = argparse.ArgumentParser()
parser.add_argument("files",
type=str,
nargs='*',
help="JSON files to summarize.")
args = parser.parse_args(args_)
summarize(args.files)
def summarize(files):
init_logging()
n_files = len(files)
if n_files < 1:
raise ValueError(f"'summarize' requires at least 1 file to summarize")
logging.info(f'{len(files)} files to summarize')
for file in files:
logging.info(f'Summary for {file}:')
with open(file, 'r') as f:
data = json.load(f)
logging.info(f"config: {data['config']}")
for bm in data['benchmarks']:
if bm['failed']:
logging.info(f"benchmark failed: {bm['name']}")
elif not bm.get('times'):
logging.info(f"benchmark has no times but is not marked failed: {bm['name']}")
if bm.get('timed_out'):
logging.info(f"benchmark timed out: {bm['name']}")
|
import logging
from importlib import import_module
from pyrite import settings
log = logging.getLogger(__name__)
DEFAULT_THEME = 'dark'
def __getattr__(name):
"""Intercept lookups of theme attributes and load from the currently
active theme based on user settings.
Client code can reference theme attributes with:
>>> from pyrite import theme
>>> t = theme.attribute # Will transparently use the active theme
"""
try:
theme = import_module(name=f".{settings['theme']}", package='pyrite.theme')
except ModuleNotFoundError:
log.error(f"Invalid theme: '{settings['theme']}', falling back to '{DEFAULT_THEME}'")
theme = import_module(name=f'.{DEFAULT_THEME}', package='pyrite.theme')
return getattr(theme, name)
|
'''
Created on May 23, 2016
@author: John
'''
name = raw_input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
counts = dict()
for line in handle:
words = line.split()
if len(words) < 5 : continue
if words[0] != "From" : continue
when = words[5]
tics = when.split(":")
if len(tics) != 3 : continue
hour = tics[0]
counts[hour] = counts.get(hour,0) + 1
lst = counts.items()
lst.sort()
for key, val in lst :
print key, val
|
# Copyright 2015 cybojenix <anthonydking@slimroms.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class App(object):
def __init__(self, data):
if data is None:
data = {}
assert hasattr(data, "get")
self.id = data.get("id", "")
self.app_name = data.get("app_name", "")
self.description = data.get("description", "")
self.app_url = data.get("app_url", "")
self.icon_image = data.get("icon_image", "")
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
import math
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from module import FeedForward, ConvAttention, PreNorm
import numpy as np
class Transformer(nn.Module):
def __init__(self, depth, dim, heads, dim_head, scale, dropout):
super(Transformer, self).__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(
nn.ModuleList([
PreNorm(dim, ConvAttention(dim, heads=heads, dim_head=dim_head, dropout=dropout)),
PreNorm(dim, FeedForward(dim, dim*scale, dropout=dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class SerialBlock(nn.Module):
def __init__(self, feature_size, in_channels, out_channels, depth=2, nheads=8, scale=8,
conv_kernel=7, stride=2, dropout=0.):
super(SerialBlock, self).__init__()
self.cls_embed = nn.Linear(in_channels, out_channels)
padding = (conv_kernel -1)//2
self.conv_embed = nn.Sequential(
nn.Conv2d(in_channels, out_channels, conv_kernel, stride, padding),
Rearrange('b c h w -> b (h w) c', h = feature_size, w = feature_size),
nn.LayerNorm(out_channels)
)
self.transformer = Transformer(depth=depth, dim=out_channels, heads=nheads, dim_head=out_channels//nheads,
scale=scale, dropout=dropout)
def forward(self, x, cls_tokens):
'''
:param x: [B C H W]
:return: [B (H W) C]
'''
x = self.conv_embed(x)
cls_tokens = self.cls_embed(cls_tokens)
x = torch.cat((cls_tokens, x), dim=1)
x = self.transformer(x)
return x
class ParallelBlock(nn.Module):
def __init__(self, in_channels, nheads=8, dropout=0.):
super(ParallelBlock, self).__init__()
self.p1 = PreNorm(in_channels, ConvAttention(in_channels,
heads=nheads,
dim_head=in_channels//nheads,
dropout=dropout))
self.p2 = PreNorm(in_channels, ConvAttention(in_channels,
heads=nheads,
dim_head=in_channels // nheads,
dropout=dropout))
self.p3 = PreNorm(in_channels, ConvAttention(in_channels,
heads=nheads,
dim_head=in_channels // nheads,
dropout=dropout))
def forward(self, x1, x2, x3):
'''
:param x: [B C H W]
:return: [B (H W) C]
'''
return self.p1(x1), self.p2(x2), self.p3(x3)
class CoaT(nn.Module):
def __init__(self, in_channels, image_size, num_classes, out_channels=[64, 128, 256, 320], depths=[2, 2, 2, 2],
heads=8, scales=[8, 8, 4, 4], downscales=[4, 2, 2, 2], kernels=[7, 3, 3, 3], use_parallel=False,
parallel_depth = 6, parallel_channels=152, dropout=0.):
super(CoaT, self).__init__()
assert len(out_channels) == len(depths) == len(scales) == len(downscales) == len(kernels)
feature_size = image_size
self.cls_token = nn.Parameter(torch.randn(1, 1, in_channels))
self.serial_layers = nn.ModuleList([])
for out_channel, depth, scale, downscale, kernel in zip(out_channels, depths, scales, downscales, kernels):
feature_size = feature_size // downscale
self.serial_layers.append(
SerialBlock(feature_size, in_channels, out_channel, depth, heads, scale, kernel, downscale, dropout)
)
in_channels = out_channel
self.use_parallel = use_parallel
if use_parallel:
self.parallel_conv_attn = nn.ModuleList([])
self.parallel_ffn = nn.ModuleList([])
for _ in range(parallel_depth):
self.parallel_conv_attn.append(ParallelBlock(parallel_channels, heads, dropout)
)
self.parallel_ffn.append(
PreNorm(parallel_channels, FeedForward(parallel_channels, parallel_channels * 4, dropout=dropout))
)
self.parallel_mlp_head = nn.Sequential(
nn.LayerNorm(in_channels*3),
nn.Linear(in_channels*3, num_classes)
)
self.serial_mlp_head = nn.Sequential(
nn.LayerNorm(in_channels),
nn.Linear(in_channels, num_classes)
)
def forward(self, x):
b, c, _, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b=b)
serial_outputs = []
for serial_block in self.serial_layers:
x = serial_block(x, cls_tokens)
serial_outputs.append(x)
cls_tokens = x[:, :1]
l = w = int(math.sqrt(x[:, 1:].shape[1]))
x = rearrange(x[:, 1:], 'b (l w) c -> b c l w', l=l, w=w)
s2 = serial_outputs[1]
s3 = serial_outputs[2]
s4 = serial_outputs[3]
if self.use_parallel:
for attn, ffn in zip(self.parallel_conv_attn, self.parallel_ffn):
s2, s3, s4 = attn(s2, s3, s4)
cls_s2 = s2[:, :1]
cls_s3 = s3[:, :1]
cls_s4 = s4[:, :1]
s2 = rearrange(s2[:,1:], 'b (l w) d -> b d l w', l=28, w=28)
s3 = rearrange(s3[:, 1:], 'b (l w) d -> b d l w', l=14, w=14)
s4 = rearrange(s4[:, 1:], 'b (l w) d -> b d l w', l=7, w=7)
s2 = s2 + F.interpolate(s3, (28, 28), mode='bilinear') + F.interpolate(s4, (28, 28), mode='bilinear')
s3 = s3 + F.interpolate(s2, (14, 14), mode='bilinear') + F.interpolate(s4, (14, 14), mode='bilinear')
s4 = s4 + F.interpolate(s2, (7, 7), mode='bilinear') + F.interpolate(s3, (7, 7), mode='bilinear')
s2 = rearrange(s2, 'b d l w -> b (l w) d')
s3 = rearrange(s3, 'b d l w -> b (l w) d')
s4 = rearrange(s4, 'b d l w -> b (l w) d')
s2 = ffn(torch.cat([cls_s2, s2], dim=1))
s3 = ffn(torch.cat([cls_s3, s3], dim=1))
s4 = ffn(torch.cat([cls_s4, s4], dim=1))
cls_tokens = torch.cat([s2[:,0], s3[:,0], s4[:,0]], dim=1)
return self.parallel_mlp_head(cls_tokens)
else:
return self.serial_mlp_head(cls_tokens.squeeze(1))
if __name__ == "__main__":
img = torch.ones([1, 3, 224, 224])
model = CoaT(3, 224, 1000, out_channels=[152, 152, 152, 152], scales=[4, 4, 4, 4], use_parallel=True)
out = model(img)
print("Shape of out :", out.shape) # [B, num_classes]
parameters = filter(lambda p: p.requires_grad, model.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
print('Trainable Parameters: %.3fM' % parameters)
|
import os
import json
import database
from exceptions import *
import datetime
class TimeReportHandler(object):
"""
Main class to handle information about projects and Employees ("using" projects)
"""
def __init__(self):
self.db = get_db_connection()
self.active_employee = None
self._get_lists()
def _get_lists(self):
# Employee
employee_data = self.db.get_data('Employee', '*')
self.employee_nr_list = employee_data['employee_nr']
self.employee_name_list = []
self.employee_name_to_nr = {}
for fi, la, nr in zip(employee_data['first_name'], employee_data['last_name'], self.employee_nr_list):
name = f'{fi} {la}'
self.employee_name_list.append(name)
self.employee_name_to_nr[name] = nr
# Projects
project_data = self.db.get_data('Project', '*')
self.project_nr_list = project_data['project_nr']
self.project_name_list = project_data['project_name']
self.project_name_to_mr = dict(zip(self.project_name_list, self.project_nr_list))
def create_database(self):
self.db.create_database()
def add_employees_from_file(self):
file_path = os.path.join(os.path.dirname(__file__), 'input', 'employees.txt')
if not os.path.exists(file_path):
raise FileNotFoundError('No employees.txt file in input')
data = load_data_from_file(file_path)
for item in data:
self.add_employee(**item)
def add_projects_from_file(self):
file_path = os.path.join(os.path.dirname(__file__), 'input', 'projects.txt')
if not os.path.exists(file_path):
raise FileNotFoundError('No projects.txt file in input')
data = load_data_from_file(file_path)
for item in data:
self.add_project(**item)
def add_employee(self, **kwargs):
"""
Adds an employee to the database
:param kwargs:
:return:
"""
self.db.add_record_to_table('Employee', **kwargs)
def add_project(self, **kwargs):
"""
Adds an employee to the database
:param kwargs:
:return:
"""
self.db.add_record_to_table('Project', **kwargs)
def get_report_time_attributes(self):
return self.db.get_attributes_for_table('TimeReport')
def get_project_attributes(self):
return self.db.get_attributes_for_table('Project')
def get_employee_attributes(self):
return self.db.get_attributes_for_table('Employee')
def get_staffing_attributes(self):
return self.db.get_attributes_for_table('Staffing')
def get_employee(self, employee_nr):
if employee_nr not in self.employee_nr_list:
raise EmployeeDoesNotExist(employee_nr)
return Employee(employee_nr)
def _check_valid_employee(self, employee):
self.db.get_data()
class Employee(object):
def __init__(self, employee_nr):
self.employee_nr = employee_nr
self.db = get_db_connection()
def __str__(self):
return f'Employee: {self.employee_nr}'
def __repr__(self):
return f'{self.__class__.__name__}({self.employee_nr}'
def get_report_time_attributes(self):
return self.db.get_attributes_for_table('TimeReport')
def report_time(self, **kwargs):
kwargs['employee_nr'] = self.employee_nr
date = kwargs.get('date')
if date:
if len(date) != 8:
raise InvalidTimeFormat(f'{date}, should be in format %Y%m%d')
else:
date = datetime.datetime.now().strftime('%Y%m%d')
kwargs['date'] = date
self.db.add_record_to_table('TimeReport', **kwargs)
def get_hours_reported(self, project=None):
if project:
data = self.db.get_data(tables='TimeReport', columns=['hours_reported'],
employee_nr=self.employee_nr, project_nr=project)
else:
data = self.db.get_data(tables='TimeReport', columns=['hours_reported'],
employee_nr=self.employee_nr)
return sum(data['hours_reported'])
def load_data_from_file(file_path):
data = []
with open(file_path) as fid:
for row, line in enumerate(fid):
line = line.strip('\n\r')
if not line:
continue
split_line = [item.strip() for item in line.split('\t')]
if not split_line[0]:
# Primary key not present
continue
if row == 0:
header = split_line
else:
data.append(dict(zip(header, split_line)))
return data
def get_db_connection():
db_file_path = os.path.join(os.path.dirname(__file__), 'time_report.db')
db = database.TimeReportDatabaseSqlite3(db_file_path)
return db
if __name__ == "__main__":
handler = TimeReportHandler()
if 1:
db_file_path = os.path.join(os.path.dirname(__file__), 'time_report.db')
if os.path.exists(db_file_path):
os.remove(db_file_path)
handler.create_database()
handler.add_employees_from_file()
handler.add_projects_from_file()
emp = handler.get_employee(1)
emp.report_time(project_nr=192007,
employee_nr=1,
hours_reported=3)
# data = handler.get_data(tables=['Employee', 'TimeReport', 'Staffing'], columns='*', employee_nr=1)
# data = handler.db.get_test_data()
# print(data)
|
from __future__ import division
##############################################################################
#
# Copyright (c) 2009-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
__copyright__="""Copyright (c) 2009-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
from esys.escript import *
from esys.escript.linearPDEs import LinearPDE
from esys.escript.models import FaultSystem
try:
from esys.finley import Rectangle
HAVE_FINLEY = True
except ImportError:
HAVE_FINLEY = False
from esys.weipa import saveVTK
from esys.escript.unitsSI import DEG
if not HAVE_FINLEY:
print("Finley module not available")
else:
#... set some parameters ...
lam=1.
mu=1
slip_max=1.
mydomain = Rectangle(l0=1.,l1=1.,n0=16, n1=16) # n1 need to be multiple of 4!!!
# .. create the fault system
fs=FaultSystem(dim=2)
fs.addFault(V0=[0.5,0.25], strikes=90*DEG, ls=0.5, tag=1)
# ... create a slip distribution on the fault:
p, m=fs.getParametrization(mydomain.getX(),tag=1)
p0,p1= fs.getW0Range(tag=1)
s=m*(p-p0)*(p1-p)/((p1-p0)/2)**2*slip_max*[0.,1.]
# ... calculate stress according to slip:
D=symmetric(grad(s))
chi, d=fs.getSideAndDistance(D.getFunctionSpace().getX(),tag=1)
sigma_s=(mu*D+lam*trace(D)*kronecker(mydomain))*chi
#... open symmetric PDE ...
mypde=LinearPDE(mydomain)
mypde.setSymmetryOn()
#... set coefficients ...
C=Tensor4(0.,Function(mydomain))
for i in range(mydomain.getDim()):
for j in range(mydomain.getDim()):
C[i,i,j,j]+=lam
C[j,i,j,i]+=mu
C[j,i,i,j]+=mu
# ... fix displacement in normal direction
x=mydomain.getX()
msk=whereZero(x[0])*[1.,0.] + whereZero(x[0]-1.)*[1.,0.] \
+whereZero(x[1])*[0.,1.] + whereZero(x[1]-1.)*[0.,1.]
mypde.setValue(A=C,X=-0.5*sigma_s,q=msk)
#... solve pde ...
mypde.getSolverOptions().setVerbosityOn()
v=mypde.getSolution()
# .. write the displacement to file:
D=symmetric(grad(v))
sigma=(mu*D+lam*trace(D)*kronecker(mydomain))+0.5*sigma_s
saveVTK("slip.vtu",disp=v+0.5*chi*s, stress= sigma)
|
#/usr/bin/python
import shutil
template_directory = "originals/template-card"
ID_FROM = 1
ID_TO = 50
brightsign_ids = map(lambda x:"%02d"%(x,), range(ID_FROM,ID_TO+1))
for bsid in brightsign_ids:
shutil.copytree(template_directory, bsid)
current_sync_filename = bsid+"/current-sync.xml"
with open(current_sync_filename, 'r+') as f:
text = f.read()
text = text.replace("$BSID", bsid)
text = text[0:-2] # Get rid of extra newline characters
f.seek(0)
f.write(text)
f.truncate()
|
# -*- coding: utf-8 -*-
"""
This module provides a variety of transforms that transform the AST
into a final form ready for code generation.
Below follows an explanation and justification of the design of the main
compilation stages in numba.
We start with a Python AST, compiled from source code or decompiled from
bytecode using meta. We run the following transformations:
1) Type inference
Infer types of all expressions, and fix the types of all local
variables. Local variable types are promoted (for instance float
to double), but cannot change (e.g. string cannot be assigned to
float).
When the type inferencer cannot determine a type, such as when it
calls a Python function or method that is not a Numba function, it
assumes type object. Object variables may be coerced to and from
most native types.
The type inferencer inserts CoercionNode nodes that perform such
coercions, as well as coercions between promotable native types.
It also resolves the return type of many math functions called
in the numpy, math and cmath modules.
Each AST expression node has a Variable that holds the type of
the expression, as well as any meta-data such as constant values
that have been determined.
2) Transform for loops
Provides some transformations of for loops over arrays to loops
over a range. Iteration over range/xrange is resolved at
compilation time.
What I would like to see is the generation of a RangeNode holding
a ast.Compare and an iteration variable incrementing ast.BinOp.
3) Low level specializations (LateSpecializer)
This stage performs low-level specializations. For instance it
resolves coercions to and from object into calls such as
PyFloat_FromDouble, with a fallback to Py_BuildValue/PyArg_ParseTuple.
This specializer also has the responsibility to ensure that new
references are accounted for by refcounting ObjectTempNode nodes.
This node absorbs the references and lets parent nodes borrow the
reference. At function cleanup, it decrefs its value. In loops,
it also decrefs any previous value, if set. Hence, these temporaries
must be initialized to NULL.
An object temporary is specific to one specific sub-expression, and
they are not reused (like in Cython).
It also rewrites object attribute access and method calls into
PyObject_GetAttrString etc.
4) Code generation
Generate LLVM code from the transformed AST.
This should be as minimal as possible, and should *not* contain
blobs of code performing complex operations. Instead, complex
operations should be broken down by AST transformers into
fundamental operations that are already supported by the AST.
This way we maximize code reuse, and make potential future additions
of different code generation backends easier. This can be taken
only so far, since low-level transformations must also tailor to
limitations of the code generation backend, such as intrinsic LLVM
calls or calls into libc. However, code reuse is especially convenient
in the face of type coercions, which LLVM does not provide any
leniency for.
"""
from __future__ import print_function, division, absolute_import
import sys
import ast
import ctypes
import warnings
if __debug__:
import pprint
import numba
from numba import *
from numba import error
from .minivect import codegen
from numba import macros, utils, typesystem
from numba import visitors, nodes
from numba import function_util
from numba.typesystem import is_obj, promote_to_native
from numba.type_inference.modules import mathmodule
from numba.nodes import constnodes
from numba.external import utility
from numba.utils import dump
import llvm.core
import numpy as np
logger = logging.getLogger(__name__)
from numba.external import pyapi
# ______________________________________________________________________
def get_funcname(py_func):
if py_func in (abs, np.abs):
return 'abs'
elif py_func is np.round:
return 'round'
return mathmodule.ufunc2math.get(py_func.__name__, py_func.__name__)
def resolve_pow(env, restype, args):
promote = env.crnt.typesystem.promote
if restype.is_numeric:
type = reduce(promote, [double, restype] + [a.type for a in args])
signature = type(*[type] * len(args))
result = nodes.MathCallNode(signature, args, None, name='pow')
else:
result = nodes.call_pyfunc(pow, args)
return nodes.CoercionNode(result, restype)
def math_call(env, name, args, dst_type):
signature = dst_type(*[a.type for a in args])
return nodes.MathCallNode(signature, args, None, name=name)
def math_call2(env, name, call_node):
return math_call(env, name, [call_node.args[0]], call_node.type)
# ______________________________________________________________________
class BuiltinResolver(object):
"""
Perform final low-level transformations such as abs(value) -> fabs(value)
"""
def __init__(self, env):
self.env = env
self.external_call = partial(function_util.external_call,
self.env.context,
self.env.crnt.llvm_module)
def resolve_builtin_call(self, node, func):
"""
Resolve an ast.Call() of a built-in function.
Returns None if no specific transformation is applied.
"""
resolver = getattr(self, '_resolve_' + func.__name__, None)
if resolver is not None:
# Pass in the first argument type
argtype = None
if len(node.args) >= 1:
argtype = node.args[0].variable.type
return resolver(func, node, argtype)
return None
def resolve_builtin_call_or_object(self, node, func):
"""
Resolve an ast.Call() of a built-in function, or call the built-in
through the object layer otherwise.
"""
result = self.resolve_builtin_call(node, func)
if result is None:
result = nodes.call_pyfunc(func, node.args)
return nodes.CoercionNode(result, node.type)
def _resolve_abs(self, func, node, argtype):
if argtype.is_int and not argtype.signed:
# abs() on unsigned integral value
return node.args[0]
elif not node.type.is_numeric:
result = nodes.call_pyfunc(func, node.args)
else:
return math_call2(self.env, 'abs', node)
def _resolve_round(self, func, node, argtype):
return nodes.call_pyfunc(round, node.args)
def _resolve_pow(self, func, node, argtype):
return resolve_pow(self.env, node.type, node.args)
def _resolve_int_number(self, func, node, argtype, dst_type, ext_name):
assert len(node.args) == 2
arg1, arg2 = node.args
if arg1.variable.type.is_string:
return nodes.CoercionNode(
nodes.ObjectTempNode(
self.external_call(ext_name, args=[arg1, nodes.NULL, arg2])),
dst_type=dst_type)
def _resolve_int(self, func, node, argtype, dst_type=int_):
if PY3:
return self._resolve_int_number(func, node, argtype, long_, 'PyLong_FromString')
return self._resolve_int_number(func, node, argtype, int_, 'PyInt_FromString')
def _resolve_long(self, func, node, argtype, dst_type=int_):
return self._resolve_int_number(func, node, argtype, long_, 'PyLong_FromString')
def _resolve_len(self, func, node, argtype):
if argtype.is_string:
call = self.external_call('strlen', node.args)
return call # nodes.CoercionNode(call, Py_ssize_t)
class ResolveCoercions(visitors.NumbaTransformer):
def visit_CoercionNode(self, node):
if not isinstance(node, nodes.CoercionNode):
# CoercionNode.__new__ returns the node to be coerced if it doesn't
# need coercion
return node
node_type = node.node.type
dst_type = node.dst_type
if __debug__ and self.env and self.env.debug_coercions:
logger.debug('coercion: %s --> %s\n%s',
node_type, dst_type, utils.pformat_ast(node))
# TODO: the below is a problem due to implicit string <-> int coercions!
if (node_type.is_string and dst_type.is_numeric and not
(node_type.is_pointer or node_type.is_null)):
if dst_type.typename in ('char', 'uchar'):
raise error.NumbaError(
node, "Conversion from string to (u)char not yet supported")
result = self.str_to_int(dst_type, node)
elif self.nopython and (is_obj(node_type) ^ is_obj(dst_type)):
raise error.NumbaError(node, "Cannot coerce to or from object in "
"nopython context")
elif is_obj(node.dst_type) and not is_obj(node_type):
node = nodes.ObjectTempNode(nodes.CoerceToObject(
node.node, node.dst_type, name=node.name))
result = self.visit(node)
elif is_obj(node_type) and not is_obj(node.dst_type):
node = nodes.CoerceToNative(node.node, node.dst_type,
name=node.name)
result = self.visit(node)
elif node_type.is_null:
if not dst_type.is_pointer:
raise error.NumbaError(node.node,
"NULL must be cast or implicitly "
"coerced to a pointer type")
result = self.visit(nodes.NULL.coerce(dst_type))
elif node_type.is_numeric and dst_type.is_bool:
to_bool = ast.Compare(node.node, [ast.NotEq()],
[nodes.const(0, node_type)])
to_bool = nodes.typednode(to_bool, bool_)
result = self.visit(to_bool)
else:
self.generic_visit(node)
if dst_type == node.node.type:
result = node.node
else:
result = node
if __debug__ and self.env and self.env.debug_coercions:
logger.debug('result = %s', utils.pformat_ast(result))
return result
def str_to_int(self, dst_type, node):
# TODO: int <-> string conversions are explicit, this should not
# TODO: be a coercion
if self.nopython:
node = nodes.CoercionNode(
function_util.external_call(
self.context,
self.llvm_module,
('atol' if dst_type.is_int else 'atof'),
args=[node.node]),
dst_type, name=node.name, )
else:
if dst_type.is_int:
cvtobj = function_util.external_call(
self.context,
self.llvm_module,
'PyInt_FromString' if not PY3 else 'PyLong_FromString',
args=[node.node, nodes.NULL,
nodes.const(10, int_)])
else:
cvtobj = function_util.external_call(
self.context,
self.llvm_module,
'PyFloat_FromString',
args=[node.node,
nodes.const(0, Py_ssize_t)])
node = nodes.CoerceToNative(nodes.ObjectTempNode(cvtobj),
dst_type, name=node.name)
result = self.visit(node)
return result
def convert_int_to_object(self, arg):
funcs = ["__Numba_PyInt_FromLongLong",
"__Numba_PyInt_FromUnsignedLongLong"]
func = funcs[arg.type.signed]
return function_util.utility_call(self.context, self.llvm_module,
func, [arg])
def visit_CoerceToObject(self, node):
new_node = node
node_type = node.node.type
if node_type.is_bool:
new_node = function_util.external_call(self.context,
self.llvm_module,
"PyBool_FromLong",
args=[node.node])
elif node_type.is_numeric and node_type.typename not in ('char', 'uchar'):
cls = None
args = node.node,
if node_type.is_int:
new_node = self.convert_int_to_object(node.node)
elif node_type.is_float:
cls = pyapi.PyFloat_FromDouble
elif node_type.is_complex:
cls = pyapi.PyComplex_FromDoubles
complex_value = nodes.CloneableNode(node.node)
args = [
nodes.ComplexAttributeNode(complex_value, "real"),
nodes.ComplexAttributeNode(complex_value.clone, "imag")
]
elif node_type.is_numpy_datetime:
datetime_value = nodes.CloneableNode(node.node)
args = [
nodes.DateTimeAttributeNode(datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(datetime_value.clone, 'units'),
nodes.ConstNode(np.datetime64(), object_),
]
new_node = function_util.utility_call(
self.context, self.llvm_module,
"create_numpy_datetime", args=args)
elif node_type.is_datetime:
datetime_value = nodes.CloneableNode(node.node)
args = [
nodes.DateTimeAttributeNode(datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(datetime_value.clone, 'units'),
]
new_node = function_util.utility_call(
self.context, self.llvm_module,
"create_python_datetime", args=args)
elif node_type.is_timedelta:
timedelta_value = nodes.CloneableNode(node.node)
args = [
nodes.TimeDeltaAttributeNode(timedelta_value, 'diff'),
nodes.TimeDeltaAttributeNode(timedelta_value.clone, 'units'),
nodes.ConstNode(np.timedelta64(), object_),
]
new_node = function_util.utility_call(
self.context, self.llvm_module,
"create_numpy_timedelta", args=args)
else:
raise error.NumbaError(
node, "Don't know how to coerce type %r to PyObject" %
node_type)
if cls:
new_node = function_util.external_call(self.context,
self.llvm_module,
cls.__name__,
args=args)
elif node_type.is_pointer and not node_type in (char.pointer(), string_):
# Create ctypes pointer object
ctypes_pointer_type = node_type.to_ctypes()
args = [nodes.CoercionNode(node.node, int64),
nodes.ObjectInjectNode(ctypes_pointer_type, object_)]
new_node = nodes.call_pyfunc(ctypes.cast, args)
self.generic_visit(new_node)
return new_node
def object_to_int(self, node, dst_type):
"""
Return node that converts the given node to the dst_type.
This also performs overflow/underflow checking, and conversion to
a Python int or long if necessary.
PyLong_AsLong and friends do not do this (overflow/underflow checking
is only for longs, and conversion to int|long depends on the Python
version).
"""
dst_type = promote_to_native(dst_type)
assert dst_type in utility.object_to_numeric, (dst_type, utility.object_to_numeric)
utility_func = utility.object_to_numeric[dst_type]
result = function_util.external_call_func(self.context,
self.llvm_module,
utility_func,
args=[node])
return result
def coerce_to_function_pointer(self, node, jit_func_type, func_pointer_type):
jit_func = jit_func_type.jit_func
if jit_func.signature != func_pointer_type.base_type:
raise error.NumbaError(node,
"Cannot coerce jit funcion %s to function of type %s" % (
jit_func, func_pointer_type))
pointer = self.env.llvm_context.get_pointer_to_function(jit_func.lfunc)
new_node = nodes.const(pointer, func_pointer_type)
return new_node
def visit_CoerceToNative(self, node):
"""
Try to perform fast coercion using e.g. PyLong_AsLong(), with a
fallback to PyArg_ParseTuple().
"""
new_node = None
from_type = node.node.type
node_type = node.type
if node_type.is_numeric:
cls = None
if node_type == size_t:
node_type = ulonglong
if node_type.is_int: # and not
new_node = self.object_to_int(node.node, node_type)
elif node_type.is_float:
cls = pyapi.PyFloat_AsDouble
elif node_type.is_complex:
# FIXME: This conversion has to be pretty slow. We
# need to move towards being ABI-savvy enough to just
# call PyComplex_AsCComplex().
cloneable = nodes.CloneableNode(node.node)
new_node = nodes.ComplexNode(
real=function_util.external_call(
self.context, self.llvm_module,
"PyComplex_RealAsDouble", args=[cloneable]),
imag=function_util.external_call(
self.context, self.llvm_module,
"PyComplex_ImagAsDouble", args=[cloneable.clone]))
elif node_type.is_numpy_datetime:
timestamp_func = function_util.utility_call(
self.context, self.llvm_module,
"convert_numpy_datetime_to_timestamp", args=[node.node])
units_func = function_util.utility_call(
self.context, self.llvm_module,
"convert_numpy_datetime_to_units", args=[node.node])
new_node = nodes.DateTimeNode(timestamp_func, units_func)
elif node_type.is_datetime:
timestamp_func = function_util.utility_call(
self.context, self.llvm_module,
"pydatetime2timestamp", args=[node.node])
units_func = function_util.utility_call(
self.context, self.llvm_module,
"pydatetime2units", args=[node.node])
new_node = nodes.DateTimeNode(timestamp_func, units_func)
elif node_type.is_numpy_timedelta:
diff_func = function_util.utility_call(
self.context, self.llvm_module,
"convert_numpy_timedelta_to_diff", args=[node.node])
units_func = function_util.utility_call(
self.context, self.llvm_module,
"convert_numpy_timedelta_to_units", args=[node.node])
new_node = nodes.DateTimeNode(diff_func, units_func)
elif node_type.is_timedelta:
raise NotImplementedError
else:
raise error.NumbaError(
node, "Don't know how to coerce a Python object to a %r" %
node_type)
if cls:
# TODO: error checking!
new_node = function_util.external_call(self.context,
self.llvm_module,
cls.__name__,
args=[node.node])
elif node_type.is_pointer and not node_type.is_string:
if from_type.is_jit_function and node_type.base_type.is_function:
new_node = self.coerce_to_function_pointer(
node, from_type, node_type)
else:
raise error.NumbaError(node, "Obtaining pointers from objects "
"is not yet supported (%s)" % node_type)
elif node_type.is_void:
raise error.NumbaError(node, "Cannot coerce %s to void" %
(from_type,))
if new_node is None:
# Create a tuple for PyArg_ParseTuple
new_node = node
new_node.node = ast.Tuple(elts=[node.node], ctx=ast.Load())
self.generic_visit(node)
return node
if new_node.type != node.type:
# Fast native coercion. E.g. coercing an object to an int_
# will use PyLong_AsLong, but that will return a long_. We
# need to coerce the long_ to an int_
new_node = nodes.CoercionNode(new_node, node.type)
# Specialize replacement node
new_node = self.visit(new_node)
return new_node
class LateSpecializer(ResolveCoercions,
visitors.NoPythonContextMixin):
def visit_FunctionDef(self, node):
self.builtin_resolver = BuiltinResolver(self.env)
node.decorator_list = self.visitlist(node.decorator_list)
# Make sure to visit the entry block (not part of the CFG) and the
# first actual code block which may have synthetically
# inserted promotions
self.visit_ControlBlock(node.flow.blocks[0])
self.visit_ControlBlock(node.flow.blocks[1])
node.body = self.visitlist(node.body)
ret_type = self.func_signature.return_type
self.verify_context(ret_type)
self.setup_error_return(node, ret_type)
return node
def verify_context(self, ret_type):
if ret_type.is_object or ret_type.is_array:
# This will require some increfs, but allow it if people
# use 'with python' later on. If 'with python' isn't used, a
# return will issue the error
#if self.nopython:
# raise error.NumbaError(
# node, "Function cannot return object in "
# "nopython context")
pass
def setup_error_return(self, node, ret_type):
"""
Set FunctionDef.error_return to the AST statement that returns a
"bad value" that can be used as error indicator.
"""
value = nodes.badval(ret_type)
if value is not None:
value = nodes.CoercionNode(value, dst_type=ret_type).cloneable
error_return = ast.Return(value=value)
if self.nopython and is_obj(self.func_signature.return_type):
error_return = nodes.WithPythonNode(body=[error_return])
error_return = self.visit(error_return)
node.error_return = error_return
def visit_ControlBlock(self, node):
# print node
self.visitchildren(node)
return node
def visit_While(self, node):
self.generic_visit(node)
return node
def check_context(self, node):
if self.nopython:
raise error.NumbaError(node, "Cannot construct object in "
"nopython context")
def _print_nopython(self, value, dest=None):
if dest is not None:
raise error.NumbaError(dest, "No file may be given in nopython mode")
# stdin, stdout, stderr = stdio_util.get_stdio_streams()
# stdout = stdio_util.get_stream_as_node(stdout)
format = codegen.get_printf_specifier(value.type)
if format is None:
raise error.NumbaError(
value, "Printing values of type '%s' is not supported "
"in nopython mode" % (value.type,))
return function_util.external_call(
self.context,
self.llvm_module,
'printf',
args=[nodes.const(format, c_string_type),
value])
def _print(self, value, dest=None):
signature, lfunc = self.context.external_library.declare(
self.llvm_module,
'PyObject_CallMethod')
if dest is None:
dest = nodes.ObjectInjectNode(sys.stdout)
value = function_util.external_call(self.context,
self.llvm_module,
"PyObject_Str",
args=[value])
args = [dest, nodes.ConstNode("write"), nodes.ConstNode("O"), value]
return nodes.NativeCallNode(signature, args, lfunc)
def visit_Print(self, node):
if self.nopython:
printfunc = self._print_nopython
dst_type = string_
else:
printfunc = self._print
dst_type = object_
result = []
if node.values:
print_space = printfunc(nodes.const(" ", dst_type), node.dest)
for value in node.values:
result.append(printfunc(value, node.dest))
result.append(print_space)
if node.nl:
result.pop() # pop last space
if node.nl:
result.append(printfunc(nodes.const("\n", dst_type), node.dest))
return ast.Suite(body=self.visitlist(result))
def visit_Tuple(self, node):
self.check_context(node)
sig, lfunc = self.context.external_library.declare(self.llvm_module,
'PyTuple_Pack')
objs = self.visitlist(nodes.CoercionNode.coerce(node.elts, object_))
n = nodes.ConstNode(len(node.elts), Py_ssize_t)
args = [n] + objs
new_node = nodes.NativeCallNode(sig, args, lfunc, name='tuple')
# TODO: determine element type of node.elts
new_node.type = typesystem.tuple_(object_, size=len(node.elts))
return nodes.ObjectTempNode(new_node)
def visit_List(self, node):
self.check_context(node)
self.generic_visit(node)
return nodes.ObjectTempNode(node)
def visit_Dict(self, node):
self.check_context(node)
self.generic_visit(node)
return nodes.ObjectTempNode(node)
def visit_ObjectCallNode(self, node):
# self.generic_visit(node)
assert node.function
if self.nopython:
meth_name = node.name and ' (%r)' % node.name
raise error.NumbaError(node, "Cannot use object call in "
"nopython context" + meth_name)
node.function = self.visit(node.function)
node.args_tuple = self.visit(node.args_tuple)
node.kwargs_dict = self.visit(node.kwargs_dict)
return nodes.ObjectTempNode(node)
def visit_Call(self, node):
func_type = node.func.type
if self.query(node, "is_math") and node.type.is_numeric:
assert node.func.type.is_known_value
name = get_funcname(node.func.type.value)
result = math_call(self.env, name, node.args, node.type)
elif func_type.is_builtin:
result = self.builtin_resolver.resolve_builtin_call_or_object(
node, func_type.func)
else:
result = nodes.call_obj(node)
return self.visit(result)
def _c_string_slice(self, node):
ret_val = node
logger.debug(node.slice)
node_slice = node.slice
if isinstance(node_slice, nodes.ObjectInjectNode):
node_slice = node.slice.object
lower, upper, step = (
value if value is None else nodes.const(value, size_t)
for value in (node_slice.start, node_slice.stop,
node_slice.step))
else:
lower, upper, step = (node_slice.lower, node_slice.upper,
node_slice.step)
if step is None:
node_value = self.visit(node.value)
if lower is None:
lower = nodes.const(0, size_t)
if upper is None:
ret_val = nodes.LLMacroNode(
macros.c_string_slice_1.__signature__,
macros.c_string_slice_1, self.visit(node.value),
self.visit(lower))
else:
ret_val = nodes.LLMacroNode(
macros.c_string_slice_2.__signature__,
macros.c_string_slice_2, self.visit(node.value),
self.visit(lower), self.visit(upper))
logger.debug(ret_val)
else:
raise NotImplementedError('String slices where step != None.')
return ret_val
def visit_Subscript(self, node):
if isinstance(node.value, nodes.ArrayAttributeNode):
if node.value.is_read_only and isinstance(node.ctx, ast.Store):
raise error.NumbaError("Attempt to load read-only attribute")
# Short-circuit visiting a Slice child if this is a nopython
# string slice.
if (self.nopython and node.value.type.is_string and
node.type.is_string):
return self.visit(self._c_string_slice(node))
# logging.debug(ast.dump(node))
# TODO: do this in the respective cases below when needed
self.generic_visit(node)
node_type = node.value.type
if ((node_type.is_object and not node_type.is_array) or
(node_type.is_array and node.slice.type.is_object)):
# Array or object slicing
if isinstance(node.ctx, ast.Load):
result = function_util.external_call(self.context,
self.llvm_module,
'PyObject_GetItem',
args=[node.value,
node.slice])
node = nodes.CoercionNode(result, dst_type=node.type)
node = self.visit(node)
else:
# This is handled in visit_Assign
pass
elif (node.value.type.is_array and node.type.is_numpy_datetime and
node.slice.type.is_int):
# JNB: ugly hack to make array of datetimes look like array of
# int64, since numba datetime type doesn't match numpy datetime type.
node.value.type = array_(int64, node.value.type.ndim,
node.value.type.is_c_contig,
node.value.type.is_f_contig,
node.value.type.inner_contig)
node.value.variable.type = node.value.type
data_node = nodes.DataPointerNode(node.value, node.slice, node.ctx)
units_node = function_util.utility_call(
self.context, self.llvm_module,
"get_units_num",
args=[nodes.ConstNode(node_type.dtype.units_char, string_)])
node = nodes.DateTimeNode(data_node, units_node)
elif (node.value.type.is_array and node.type.is_numpy_timedelta and
node.slice.type.is_int):
# JNB: ugly hack to make array of timedeltas look like array of
# int64, since numba timedelta type doesn't match numpy timedelta type.
node.value.type = array_(int64, node.value.type.ndim,
node.value.type.is_c_contig,
node.value.type.is_f_contig,
node.value.type.inner_contig)
node.value.variable.type = node.value.type
data_node = nodes.DataPointerNode(node.value, node.slice, node.ctx)
units_node = function_util.utility_call(
self.context, self.llvm_module,
"get_units_num",
args=[nodes.ConstNode(node_type.dtype.units_char, string_)])
node = nodes.TimeDeltaNode(data_node, units_node)
elif (node.value.type.is_array and not node.type.is_array and
node.slice.type.is_int):
# Array index with integer indices
node = nodes.DataPointerNode(node.value, node.slice, node.ctx)
elif node.value.type.is_string and node.type.is_string:
node.value = nodes.CoercionNode(node.value, dst_type = object_)
node.type = object_
node = nodes.CoercionNode(nodes.ObjectTempNode(node),
dst_type = c_string_type)
node = self.visit(node)
return node
def visit_ExtSlice(self, node):
if node.type.is_object:
return self.visit(ast.Tuple(elts=node.dims, ctx=ast.Load()))
else:
if node.type.is_float:
self.warn(node, "Using a float for indexing")
self.generic_visit(node)
return node
def visit_Index(self, node):
return self.visit(node.value)
def allocate_struct_on_stack(self, assmnt_node, target):
# Allocate struct on stack
temp = nodes.TempNode(target.type)
assmnt_node.targets[0] = temp.store()
assmnt_node.value = self.visit(assmnt_node.value)
# Expose LLVM value through SSA (patch the Variable or the
# LHS). We need to store the pointer to the struct (the alloca)
ssa_assmnt = ast.Assign(targets=[target], value=temp.store())
return ast.Suite(body=[assmnt_node, ssa_assmnt])
def visit_Assign(self, node):
target = node.targets[0]
target_is_subscript = (len(node.targets) == 1 and
isinstance(target, ast.Subscript))
if target_is_subscript and is_obj(target.type):
# Slice assignment / index assignment w/ objects
# TODO: discount array indexing with dtype object
target = self.visit(target)
obj = target.value
key = target.slice
value = self.visit(node.value)
call = function_util.external_call(self.context,
self.llvm_module,
'PyObject_SetItem',
args=[obj, key, value])
return self.visit(call)
elif target.type.is_struct and nodes.is_name(target):
node = self.allocate_struct_on_stack(node, target)
return node
self.generic_visit(node)
return node
def visit_Slice(self, node):
"""
Rewrite slice objects. Do this late in the pipeline so that other
code can still recognize the code structure.
"""
slice_values = [node.lower, node.upper, node.step]
if self.nopython:
raise error.NumbaError(node, "Cannot slice in nopython context")
if node.variable.is_constant:
return self.visit(nodes.ObjectInjectNode(node.variable.constant_value))
bounds = []
for node in slice_values:
if node is None:
bounds.append(nodes.NULL_obj)
else:
bounds.append(node)
new_slice = function_util.external_call(self.context,
self.llvm_module,
'PySlice_New',
args=bounds,
temp_name='slice')
return self.visit(new_slice)
# return nodes.ObjectTempNode(new_slice)
def visit_Attribute(self, node):
if (self.nopython and not node.value.type.is_module and
not node.value.type.is_complex and
not node.value.type.is_datetime and
not node.value.type.is_timedelta):
raise error.NumbaError(
node, "Cannot access Python attribute in nopython context (%s)" % node.attr)
if node.value.type.is_complex:
value = self.visit(node.value)
return nodes.ComplexAttributeNode(value, node.attr)
elif node.value.type.is_numpy_datetime:
value = self.visit(node.value)
if node.attr in ['year', 'month', 'day', 'hour', 'min', 'sec']:
func_dict = {'year' : 'extract_datetime_year',
'month' : 'extract_datetime_month',
'day' : 'extract_datetime_day',
'hour' : 'extract_datetime_hour',
'min' : 'extract_datetime_min',
'sec' : 'extract_datetime_sec',}
value = nodes.CloneableNode(value)
timestamp_node = nodes.DateTimeAttributeNode(value,
'timestamp')
unit_node = nodes.DateTimeAttributeNode(value.clone, 'units')
new_node = function_util.utility_call(
self.context, self.llvm_module,
func_dict[node.attr],
args=[timestamp_node, unit_node])
return new_node
else:
return nodes.DateTimeAttributeNode(value, node.attr)
elif node.value.type.is_datetime:
value = self.visit(node.value)
return nodes.DateTimeAttributeNode(value, node.attr)
elif node.value.type.is_timedelta:
value = self.visit(node.value)
return nodes.TimeDeltaAttributeNode(value, node.attr)
elif node.type.is_numpy_attribute:
return nodes.ObjectInjectNode(node.type.value)
elif node.type.is_numpy_dtype:
dtype_type = node.type.dtype
return nodes.ObjectInjectNode(dtype_type.get_dtype())
elif is_obj(node.value.type):
if node.value.type.is_module:
# Resolve module attributes as constants
if node.type.is_module_attribute:
new_node = nodes.ObjectInjectNode(node.type.value)
else:
new_node = nodes.ConstNode(getattr(node.value.type.module,
node.attr))
else:
new_node = function_util.external_call(
self.context,
self.llvm_module,
'PyObject_GetAttrString',
args=[node.value,
nodes.ConstNode(node.attr)])
return self.visit(new_node)
self.generic_visit(node)
return node
def visit_ArrayNewNode(self, node):
if self.nopython:
raise error.NumbaError(
node, "Cannot yet allocate new array in nopython context")
PyArray_Type = nodes.ObjectInjectNode(np.ndarray)
descr = nodes.ObjectInjectNode(node.type.dtype.get_dtype()).cloneable
ndim = nodes.const(node.type.ndim, int_)
flags = nodes.const(0, int_)
args = [PyArray_Type, descr.clone, ndim,
node.shape, node.strides, node.data, flags]
incref_descr = nodes.IncrefNode(descr)
incref_base = None
setbase = None
if node.base is None:
args.append(nodes.NULL_obj)
else:
base = nodes.CloneableNode(node.base)
incref_base = nodes.IncrefNode(base)
args.append(base.clone)
array = nodes.PyArray_NewFromDescr(args)
array = nodes.ObjectTempNode(array).cloneable
body = [incref_descr, incref_base, array, setbase]
if node.base is not None:
body.append(nodes.PyArray_SetBaseObject([array.clone, base.clone]))
# TODO: PyArray_UpdateFlags()
result = nodes.ExpressionNode(filter(None, body), array.clone)
return self.visit(result)
def visit_ArrayNewEmptyNode(self, node):
if self.nopython:
raise error.NumbaError(
node, "Cannot yet allocate new empty array in nopython context")
ndim = nodes.const(node.type.ndim, int_)
dtype = nodes.const(node.type.dtype.get_dtype(), object_).cloneable
is_fortran = nodes.const(node.is_fortran, int_)
result = nodes.PyArray_Empty([ndim, node.shape, dtype, is_fortran])
result = nodes.ObjectTempNode(result)
incref_descr = nodes.IncrefNode(dtype)
return self.visit(nodes.ExpressionNode([incref_descr], result))
def visit_Name(self, node):
if node.variable.is_constant:
obj = node.variable.constant_value
return self.visit(nodes.const(obj, node.type))
return node
def visit_Return(self, node):
return_type = self.func_signature.return_type
if node.value is not None:
node.value = self.visit(nodes.CoercionNode(node.value, return_type))
return node
def visit_For(self, node):
self.generic_visit(node)
return node
def _object_binop(self, node, api_name):
return self.visit(
function_util.external_call(self.context,
self.llvm_module,
api_name,
args=[node.left,
node.right]))
def _object_Add(self, node):
return self._object_binop(node, 'PyNumber_Add')
def _object_Sub(self, node):
return self._object_binop(node, 'PyNumber_Subtract')
def _object_Mult(self, node):
return self._object_binop(node, 'PyNumber_Multiply')
def _object_Div(self, node):
if PY3:
return self._object_binop(node, 'PyNumber_TrueDivide')
else:
return self._object_binop(node, 'PyNumber_Divide')
def _object_Mod(self, node):
return self._object_binop(node, 'PyNumber_Remainder')
def _object_Pow(self, node):
args = [node.left,
node.right,
nodes.ObjectInjectNode(None)]
return self.visit(function_util.external_call(self.context,
self.llvm_module,
'PyNumber_Power',
args=args),
llvm_module=self.llvm_module)
def _object_LShift(self, node):
return self._object_binop(node, 'PyNumber_Lshift')
def _object_RShift(self, node):
return self._object_binop(node, 'PyNumber_Rshift')
def _object_BitOr(self, node):
return self._object_binop(node, 'PyNumber_Or')
def _object_BitXor(self, node):
return self._object_binop(node, 'PyNumber_Xor')
def _object_BitAnd(self, node):
return self._object_binop(node, 'PyNumber_And')
def _object_FloorDiv(self, node):
return self._object_binop(node, 'PyNumber_FloorDivide')
def visit_BinOp(self, node):
if isinstance(node.op, ast.Pow):
return self.visit(resolve_pow(self.env, node.type, [node.left,
node.right]))
self.generic_visit(node)
if is_obj(node.left.type) or is_obj(node.right.type):
op_name = type(node.op).__name__
op_method = getattr(self, '_object_%s' % op_name, None)
if op_method:
node = op_method(node)
else:
raise error.NumbaError(
node, 'Unsupported binary operation for object: %s' %
op_name)
elif node.left.type.is_datetime and node.right.type.is_datetime:
if isinstance(node.op, ast.Sub):
datetime_value = nodes.CloneableNode(node.left)
units1_node = nodes.DateTimeAttributeNode(
datetime_value, 'units')
datetime_value = nodes.CloneableNode(node.right)
units2_node = nodes.DateTimeAttributeNode(
datetime_value, 'units')
unit_node = function_util.utility_call(
self.context, self.llvm_module,
"get_target_unit_for_datetime_datetime",
args=[units1_node, units2_node])
datetime_value = nodes.CloneableNode(node.left)
args1 = [
nodes.DateTimeAttributeNode(datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(datetime_value.clone, 'units'),
]
datetime_value = nodes.CloneableNode(node.right)
args2 = [
nodes.DateTimeAttributeNode(datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(datetime_value.clone, 'units'),
]
diff_node = function_util.utility_call(
self.context, self.llvm_module,
"sub_datetime_datetime", args=args1+args2+[unit_node])
node = nodes.TimeDeltaNode(diff_node, unit_node)
else:
raise NotImplementedError
elif (node.left.type.is_datetime and
node.right.type.is_timedelta) or \
(node.left.type.is_timedelta and
node.right.type.is_datetime):
if isinstance(node.op, ast.Add) or isinstance(node.op, ast.Sub):
datetime_value = nodes.CloneableNode(node.left)
if node.left.type.is_datetime:
units1_node = nodes.DateTimeAttributeNode(
datetime_value, 'units')
else:
units1_node = nodes.TimeDeltaAttributeNode(
datetime_value, 'units')
datetime_value = nodes.CloneableNode(node.right)
if node.right.type.is_datetime:
units2_node = nodes.DateTimeAttributeNode(
datetime_value, 'units')
else:
units2_node = nodes.TimeDeltaAttributeNode(
datetime_value, 'units')
unit_node = function_util.utility_call(
self.context, self.llvm_module,
"get_target_unit_for_datetime_timedelta",
args=[units1_node, units2_node])
datetime_value = nodes.CloneableNode(node.left)
if node.left.type.is_datetime:
args1 = [
nodes.DateTimeAttributeNode(
datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(
datetime_value.clone, 'units'),]
else:
args1 = [
nodes.TimeDeltaAttributeNode(
datetime_value, 'diff'),
nodes.TimeDeltaAttributeNode(
datetime_value.clone, 'units'),]
datetime_value = nodes.CloneableNode(node.right)
if node.right.type.is_datetime:
args2 = [
nodes.DateTimeAttributeNode(
datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(
datetime_value.clone, 'units'),]
else:
args2 = [
nodes.TimeDeltaAttributeNode(
datetime_value, 'diff'),
nodes.TimeDeltaAttributeNode(
datetime_value.clone, 'units'),]
if isinstance(node.op, ast.Add):
diff_node = function_util.utility_call(
self.context, self.llvm_module,
"add_datetime_timedelta",
args=args1+args2+[unit_node])
elif isinstance(node.op, ast.Sub):
diff_node = function_util.utility_call(
self.context, self.llvm_module,
"sub_datetime_timedelta",
args=args1+args2+[unit_node])
node = nodes.DateTimeNode(diff_node, unit_node)
else:
raise NotImplementedError
elif node.left.type.is_string and node.right.type.is_string:
node.left = nodes.CoercionNode(node.left, object_)
node.right = nodes.CoercionNode(node.right, object_)
return nodes.CoercionNode(self.visit_BinOp(node), c_string_type)
return node
def _object_unaryop(self, node, api_name):
return self.visit(
function_util.external_call(self.context,
self.llvm_module,
api_name,
args=[node.operand]))
def _object_Invert(self, node):
return self._object_unaryop(node, 'PyNumber_Invert')
def _object_Not(self, node):
callnode = function_util.external_call(self.function_cache,
self.llvm_module,
'PyObject_IsTrue',
args=[node.operand])
cmpnode = ast.Compare(callnode, [nodes.Eq()], [nodes.ConstNode(0)])
return self.visit(nodes.IfExp(cmpnode,
nodes.ObjectInjectNode(True),
nodes.ObjectInjectNode(False)))
def _object_UAdd(self, node):
return self._object_unaryop(node, 'PyNumber_Positive')
def _object_USub(self, node):
return self._object_unaryop(node, 'PyNumber_Negative')
def visit_UnaryOp(self, node):
self.generic_visit(node)
if is_obj(node.type):
op_name = type(node.op).__name__
op_method = getattr(self, '_object_%s' % op_name, None)
if op_method:
node = op_method(node)
else:
raise error.NumbaError(
node, 'Unsupported unary operation for objects: %s' %
op_name)
return node
def visit_ConstNode(self, node):
constant = node.pyval
if node.type.is_known_value:
node.type = object_ # TODO: Get rid of known_value
if node.type.is_complex:
real = nodes.ConstNode(constant.real, node.type.base_type)
imag = nodes.ConstNode(constant.imag, node.type.base_type)
node = nodes.ComplexNode(real, imag)
elif node.type.is_numpy_datetime:
datetime_str = nodes.ConstNode('', c_string_type)
node = nodes.NumpyDateTimeNode(datetime_str)
elif node.type.is_datetime:
# JNB: not sure what to do here for datetime value
timestamp = nodes.ConstNode(0, int64)
units = nodes.ConstNode(0, int32)
node = nodes.DateTimeNode(timestamp, units)
elif node.type.is_timedelta:
diff = nodes.ConstNode(0, int64)
units = nodes.ConstNode(0, int32)
node = nodes.TimeDeltaNode(diff, units)
elif node.type.is_pointer and not node.type.is_string:
addr_int = constnodes.get_pointer_address(constant, node.type)
node = nodes.ptrfromint(addr_int, node.type)
elif node.type.is_object and not nodes.is_null_constant(constant):
node = nodes.ObjectInjectNode(constant, node.type)
return node
#------------------------------------------------------------------------
# User nodes
#------------------------------------------------------------------------
def visit_UserNode(self, node):
return node.specialize(self)
|
# Copyright 2020 ewan xu<ewan_xu@outlook.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
""" Collaborative Functional Link Adaptive Filter """
import numpy as np
def cflaf(x, d, M=128, P=5, mu_L=0.2, mu_FL=0.5, mu_a=0.5):
nIters = min(len(x),len(d)) - M
Q = P*2
beta = 0.9
sk = np.arange(0,Q*M,2)
ck = np.arange(1,Q*M,2)
pk = np.tile(np.arange(P),M)
u = np.zeros(M)
w_L = np.zeros(M)
w_FL = np.zeros(Q*M)
alpha = 0
gamma = 1
e = np.zeros(nIters)
for n in range(nIters):
u[1:] = u[:-1]
u[0] = x[n]
g = np.repeat(u,Q)
g[sk] = np.sin(pk*np.pi*g[sk])
g[ck] = np.cos(pk*np.pi*g[ck])
y_L = np.dot(w_L, u.T)
y_FL = np.dot(w_FL,g.T)
e_FL = d[n] - (y_L+y_FL)
w_FL = w_FL + mu_FL * e_FL * g / (np.dot(g,g)+1e-3)
lambda_n = 1 / (1 + np.exp(-alpha))
y_N = y_L + lambda_n*y_FL
e_n = d[n] - y_N
gamma = beta*gamma + (1-beta)*(y_FL**2)
alpha = alpha + (mu_a*e_n*y_FL*lambda_n*(1-lambda_n) / gamma)
alpha = np.clip(alpha,-4,4)
w_L = w_L + mu_L*e_n*u/(np.dot(u,u)+1e-3)
e[n] = e_n
return e
|
"""
Measure resonators, one at a time, with the readout tone centered in the filterbank bin.
"""
from __future__ import division
import time
import numpy as np
from kid_readout.roach import analog, calculate, hardware_tools, tools
from kid_readout.measurement import acquire, basic
from kid_readout.equipment import hardware, starcryo_temps
from equipment.srs import lockin
from equipment.custom import mmwave_source
from kid_readout.settings import LOCKIN_SERIAL_PORT
acquire.show_settings()
acquire.show_git_status()
import logging
logger = acquire.get_script_logger(__file__, level=logging.DEBUG)
# Parameters
suffix = 'test'
attenuations = [0]
f_center = 1e6 * np.array([3420.5])
fractional_frequency_shift = 0
f_center *= (1 + fractional_frequency_shift)
df_baseband_target = 15e3
fine_sweep_num_linewidths = 5
f_sweep_span = 2e6 # The total span of the baseband tones
coarse_stride = 32
f_lo_spacing = 2.5e3 # This is the smallest resolution available
f_baseband_minimum = 100e6 # Keep the tones away from the LO by at least this frequency.
sweep_length_seconds = 0.01
stream_length_seconds = 10
# Hardware
temperature = starcryo_temps.Temperature()
lock = lockin.SR830(serial_device=LOCKIN_SERIAL_PORT)
lock.identification # This seems to be necessary to wake up the lockin
mmw = mmwave_source.MMWaveSource()
mmw.set_attenuator_ticks(0, 0)
mmw.multiplier_input = 'thermal'
mmw.ttl_modulation_source = "roach_2"
mmw.waveguide_twist_angle = 0
conditioner = analog.HeterodyneMarkII()
hw = hardware.Hardware(temperature, lock, mmw, conditioner)
ri = hardware_tools.r2h11nc_with_mk2(initialize=True, use_config=False)
ri.set_modulation_output('high')
ri.iq_delay = -1
ri.adc_valon.set_ref_select(0) # internal
assert np.all(ri.adc_valon.get_phase_locks())
# Calculate sweep parameters, LO and baseband sweep frequencies
ri_state = ri.state
tone_sample_exponent = int(np.round(np.log2(ri_state.adc_sample_rate / df_baseband_target)))
df_baseband = ri_state.adc_sample_rate / 2 ** tone_sample_exponent
num_sweep_tones = int(f_sweep_span / df_baseband)
f_baseband = f_baseband_minimum + ri.state.adc_sample_rate / 2 ** tone_sample_exponent * np.arange(num_sweep_tones)
f_lo_center = f_lo_spacing * np.round((f_center - f_baseband.mean()) / f_lo_spacing)
logger.info("Fine sweep using {:d} tones spanning {:.1f} MHz with resolution {:.0f} Hz (2^{:d} samples)".format(
num_sweep_tones, 1e-6 * f_baseband.ptp(), df_baseband, tone_sample_exponent))
logger.info("Coarse sweep using {:d} tones spanning {:.1f} MHz with resolution {:.0f} Hz (2^{:d} samples)".format(
num_sweep_tones // coarse_stride, 1e-6 * f_baseband.ptp(), coarse_stride * df_baseband, tone_sample_exponent))
# Run
npd = acquire.new_npy_directory(suffix=suffix)
tic = time.time()
try:
for lo_index, f_lo in enumerate(f_lo_center):
assert np.all(ri.adc_valon.get_phase_locks())
tools.set_and_attempt_external_phase_lock(ri=ri, f_lo=1e-6 * f_lo, f_lo_spacing=1e-6 * f_lo_spacing)
for attenuation_index, attenuation in enumerate(attenuations):
ri.set_dac_attenuator(attenuation)
#ri.set_tone_baseband_freqs(freqs=1e-6 * np.array([f_baseband[0]]), nsamp=2 ** tone_sample_exponent)
#time.sleep(1)
#tools.optimize_fft_gain(ri, fraction_of_maximum=0.5)
ri.set_fft_gain(4)
coarse_state = hw.state()
coarse_state['lo_index'] = lo_index
coarse_state['attenuation_index'] = attenuation_index
coarse_sweep = acquire.run_sweep(ri=ri, tone_banks=1e-6 * (f_lo + f_baseband[::coarse_stride, np.newaxis]),
num_tone_samples=2 ** tone_sample_exponent,
length_seconds=stream_length_seconds, state=coarse_state,
verbose=True)[0]
npd.write(coarse_sweep)
coarse_f_r = coarse_sweep.resonator.f_0
coarse_Q = coarse_sweep.resonator.Q
logger.info("Coarse sweep f_r = {:.3f} MHz +/- {:.0f} Hz".format(1e-6 * coarse_f_r,
coarse_sweep.resonator.f_0_error))
logger.info("Coarse sweep Q = {:.0f} +/- {:.0f}".format(coarse_Q, coarse_sweep.resonator.Q_error))
raise Exception()
df_filterbank = calculate.stream_sample_rate(ri_state)
f_baseband_bin_center = df_filterbank * np.round(f_baseband.mean() / df_filterbank)
f_lo_fine = f_lo_spacing * np.round((coarse_f_r - f_baseband_bin_center) / f_lo_spacing)
assert np.all(ri.adc_valon.get_phase_locks())
tools.set_and_attempt_external_phase_lock(ri=ri, f_lo=1e-6 * f_lo, f_lo_spacing=1e-6 * f_lo_spacing)
#fine_indices = np.where(np.abs(f_lo_fine + f_baseband - coarse_f_r) <=
# (fine_sweep_num_linewidths / 2) * (coarse_f_r / coarse_Q))[0]
fine_indices = np.arange(f_baseband.size)
fine_sweep = acquire.run_sweep(ri=ri, tone_banks=1e-6 * (f_lo + f_baseband[fine_indices, np.newaxis]),
num_tone_samples=2 ** tone_sample_exponent,
length_seconds=stream_length_seconds, state=hw.state())[0]
ri.set_tone_freqs(np.array([]))
logger.info("Recording {:.1f} s stream with source off".format(stream_length_seconds))
off_stream = ri.get_measurement(num_seconds=stream_length_seconds, demod=True, state=hw.state())[0]
ri.set_modulation_output(7)
logger.info("Recording {:.1f} s stream with source modulating".format(stream_length_seconds))
mod_stream = ri.get_measurement(num_seconds=stream_length_seconds, demod=True, state=hw.state())[0]
ri.set_modulation_output('high')
sweep_stream_list = basic.SingleSweepStreamList(single_sweep=fine_sweep,
stream_list=[off_stream, mod_stream],
state={'lo_index': lo_index,
'attenuation_index': attenuation_index})
npd.write(sweep_stream_list)
npd.write(ri.get_adc_measurement())
finally:
ri.set_modulation_output('high')
ri.set_dac_attenuator(62)
npd.close()
print("Wrote {}".format(npd.root_path))
print("Elapsed time {:.0f} minutes.".format((time.time() - tic) / 60))
|
def test_InStock_cost():
"""
Tests if cost value is received correctly
when condition is In Stock on www.beswalmarttbuy.com.
"""
def test_OutOfStock_cost():
"""
Tests if cost value is received correctly
when condition is Out of Stock on www.walmart.com.
"""
|
#!/usr/bin/env python3
"""
**Description**
Helper functions for OpenAI Gym environments.
"""
import operator
from functools import reduce
from collections import OrderedDict
from gym.spaces import Box, Discrete, Dict, Tuple
def is_vectorized(env):
return hasattr(env, 'num_envs') and env.num_envs > 1
def is_discrete(space, vectorized=False):
"""
Returns whether a space is discrete.
**Arguments**
* **space** - The space.
* **vectorized** - Whether to return the discreteness for the
vectorized environments (True) or just the discreteness of
the underlying environment (False).
"""
msg = 'Space type not supported.'
assert isinstance(space, (Box, Discrete, Dict, Tuple)), msg
if isinstance(space, Discrete):
return True
if isinstance(space, Box):
return False
if isinstance(space, Dict):
dimensions = {
k[0]: is_discrete(k[1], vectorized) for k in space.spaces.items()
}
return OrderedDict(dimensions)
if isinstance(space, Tuple):
if not vectorized:
return is_discrete(space[0], vectorized)
discrete = tuple(
is_discrete(s) for s in space
)
return discrete
def get_space_dimension(space, vectorized_dims=False):
"""
Returns the number of elements of a space sample, when unrolled.
**Arguments**
* **space** - The space.
* **vectorized_dims** - Whether to return the full dimension for vectorized
environments (True) or just the dimension for the underlying
environment (False).
"""
msg = 'Space type not supported.'
assert isinstance(space, (Box, Discrete, Dict, Tuple)), msg
if isinstance(space, Discrete):
return space.n
if isinstance(space, Box):
if len(space.shape) > 1 and not vectorized_dims:
return reduce(operator.mul, space.shape[1:], 1)
return reduce(operator.mul, space.shape, 1)
if isinstance(space, Dict):
dimensions = {
k[0]: get_space_dimension(k[1], vectorized_dims) for k in space.spaces.items()
}
return OrderedDict(dimensions)
if isinstance(space, Tuple):
if not vectorized_dims:
return get_space_dimension(space[0], vectorized_dims)
dimensions = tuple(
get_space_dimension(s) for s in space
)
return dimensions
|
from scipy import eye, kron, diag
# Define the basis for the liouvillian
# Axes: _XY, __Z
_XY, __Z = (
diag([1.0, 1.0, 0.0]),
diag([0.0, 0.0, 1.0]),
)
# States: B, C or A & B & C
___B, ___C, _ABC = (
diag([0.0, 1.0, 0.0]),
diag([0.0, 0.0, 1.0]),
diag([1.0, 1.0, 1.0]),
)
# Auto-relaxation rates
R_IXY, R_IZ, DR_IXY_AB, DR_IXY_AC = (
kron(_ABC, -_XY),
kron(_ABC, -__Z),
kron(___B, -_XY),
kron(___C, -_XY),
)
# Chemical shifts
_CS = [[+0.0, -1.0, +0.0],
[+1.0, +0.0, +0.0],
[+0.0, +0.0, +0.0]]
CS, DW_AB, DW_AC = (
kron(_ABC, _CS),
kron(___B, _CS),
kron(___C, _CS),
)
# Exchange rates
KAB = kron([[-1.0, +0.0, +0.0],
[+1.0, +0.0, +0.0],
[+0.0, +0.0, +0.0]], eye(3))
KBA = kron([[+0.0, +1.0, +0.0],
[+0.0, -1.0, +0.0],
[+0.0, +0.0, +0.0]], eye(3))
KBC = kron([[+0.0, +0.0, +0.0],
[+0.0, -1.0, +0.0],
[+0.0, +1.0, +0.0]], eye(3))
KCB = kron([[+0.0, +0.0, +0.0],
[+0.0, +0.0, +1.0],
[+0.0, +0.0, -1.0]], eye(3))
KAC = kron([[-1.0, +0.0, +0.0],
[+0.0, +0.0, +0.0],
[+1.0, +0.0, +0.0]], eye(3))
KCA = kron([[+0.0, +0.0, +1.0],
[+0.0, +0.0, +0.0],
[+0.0, +0.0, -1.0]], eye(3))
# B1 field along x
W1X = kron(_ABC, [[+0.0, +0.0, +0.0],
[+0.0, +0.0, -1.0],
[+0.0, +1.0, +0.0]])
|
import os
import sys
import glob # Only for python 3.5+
import generate_txt_from_pydocs as gen_txt
''' Use Example:
python generate_txt_from_python_progs.py [SOURCE_FILE_DIR] [TARGET_DIRECTORY]
'''
def gen_txt_from_python_files(source_path, target_directory):
os.system('mkdir ' + target_directory)
target_directory = target_directory + \
'/' if target_directory[-1] != '/' else target_directory
f_target_mode = 'w'
target_file_name = ''
save_class_or_func_name_only = True # Setting this True overrides the save_entire_path
save_entire_path = False
for filename in glob.iglob(sys.argv[1] + '**/*.py', recursive=True):
print(filename)
if save_entire_path:
# i.e. target_file_name = target_directory/main_module_sub_module1–sub_module2_filename
target_file_name = target_directory + \
filename.replace('/', '_').replace('.py', '')
else:
# i.e. target_file_name = filename.txt
target_file_name = target_directory + \
(filename.split('/')[-1]).replace('.py', '')
with open(filename, 'r') as fr:
split = []
repeat = 0
for line in fr:
if repeat == 1:
target_file_name_with_module = target_file_name + '_' + \
(split[0])
# if we only want to save the func_name, this might cause collsions
if save_class_or_func_name_only:
target_file_name_with_module = target_directory + \
split[0]
target_file_name_with_module = target_file_name_with_module.replace(
'class ', '').replace('def ', '').replace('.', '_')
print("\tTARGET=", target_file_name_with_module)
with open(target_file_name_with_module+'.txt', f_target_mode) as fw:
fw.write('('.join(split))
for line in fr:
if line.find('def') == 0 or line.find('class') == 0:
break
else:
fw.write(line)
if line.find('def') == 0 or line.find('class') == 0:
target_file_name_with_module = target_file_name + '_' + \
(line.split('(')[0])
# if we only want to save the func_name, this might cause collsions
if save_class_or_func_name_only:
target_file_name_with_module = target_directory + \
(line.split('(')[0])
target_file_name_with_module = target_file_name_with_module.replace(
'class ', '').replace('def ', '').replace('.', '_')
print("\tTARGET=", target_file_name_with_module)
with open(target_file_name_with_module+'.txt', f_target_mode) as fw:
fw.write(line)
for line in fr:
if line.find('def') == 0 or line.find('class') == 0:
repeat = 1
split = line.split('(')
break
else:
fw.write(line)
def main():
if len(sys.argv) != 3:
print(
"Usage: python generate_txt_from_python_progs.py [SOURCE_FILE_DIR] [TARGET_DIRECTORY]")
return
gen_txt_from_python_files(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
|
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
map = Basemap(projection='poly',
lon_0=0.0, lat_0=0,
llcrnrlon=-80.,llcrnrlat=-40,urcrnrlon=80.,urcrnrlat=40.)
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
map.drawcoastlines()
map.drawparallels(range(-90, 100, 10), linewidth=2, dashes=[4, 2], labels=[1,0,0,1], color='r', zorder=0 )
plt.show()
|
from keystoneclient.auth.identity import v3
from keystoneclient import session
from keystoneclient.v3 import client as keystoneclient
from subprocess import call, Popen, PIPE
import requests
import json
import testtools
keystone_url = "http://10.4.13.14:35357/v3"
cinder_url = "http://10.4.13.14:8776/v2/"
quota_url = "/os-quota-sets/"
project_url = keystone_url + "/projects"
domain_url = keystone_url + "/domains"
def get_token_json(name, project_id):
return '{ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "domain": { "name": "Domain" }, "name": "%s", "password": "secretsecret" } } }, "scope": { "project": { "domain": { "name": "Domain" }, "id": "%s" } } } }' % (name, project_id)
def default_token_json(name, project_id):
return '{ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "domain": { "name": "Default" }, "name": "%s", "password": "nomoresecrete" } } }, "scope": { "project": { "domain": { "name": "Default" }, "name": "%s" } } } }' % (name, project_id)
def domain_json():
return '{ "domain": { "desctiption": "My new domain", "enabled": true, "name": "Domain" } }'
def project_json(name, domain_id, parent_id=None):
return '{ "project": { "description": "My new project", "domain_id": "%s", "parent_id": "%s", "enabled": true, "name": "%s" } }' % (domain_id, parent_id, name) if parent_id else '{ "project": { "description": "My new project", "domain_id": "%s", "enabled": true, "name": "%s"} }' % (domain_id, name)
def get_token(token_json):
token_headers = {'Content-Type': 'application/json'}
r = requests.post(keystone_url + "/auth/tokens",
headers=token_headers,
data=token_json)
return r.headers['x-subject-token']
def get_role(token, name):
headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
r = requests.get(keystone_url + '/roles?name=%s' % name,
headers=headers)
return json.loads(r._content)['roles'][0]['id']
def create_domain(data, token):
create_domain_headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
r = requests.post(domain_url,
headers=create_domain_headers,
data=data)
return json.loads(r._content)['domain']['id']
def create_project(data, token):
create_project_headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
r = requests.post(project_url,
headers=create_project_headers,
data=data)
return json.loads(r._content)['project']['id']
def disable_domain(token, domain_id):
headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
data = '{ "domain": {"enabled": false}}'
r = requests.patch(domain_url+ "/%s" % domain_id,
headers=headers,
data=data)
print "Disabled domain %s" % domain_id
def disable_project(token, project_id):
headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
data = '{ "project": {"enabled": false}}'
r = requests.patch(project_url+ "/%s" % project_id,
headers=headers,
data=data)
print "Disabled project %s" % project_id
def delete_domain(token, domain_id):
headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
r = requests.delete(domain_url+ "/%s" % domain_id,
headers=headers)
print "Deleted domain %s" % domain_id
def delete_project(token, project_id):
headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
r = requests.delete(project_url+ "/%s" % project_id,
headers=headers)
print "Deleted project %s" % project_id
def create_user(token, user_name, domain_id):
headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
data = '{ "user": {"description": "User", "domain_id": "%s", "email": "jdoe@example.com", "enabled": true, "name": "%s", "password": "secretsecret" } }' % (domain_id, user_name)
r = requests.post(keystone_url + '/users',
headers=headers,
data=data)
user_id = json.loads(r._content)['user']['id']
print "Created user %s in project %s" % (user_id, domain_id)
return json.loads(r._content)['user']['id']
def grant_user_role(token, user_id, role, projects):
headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
for project in projects:
grant_role = requests.put(project_url + "/%s/users/%s/roles/%s" % (project, user_id, role),
headers=headers)
print "Granted role for user %s in project %s" % (user_id, project)
def update_quota(token, project_id, target, value):
headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
data = '{ "quota_set": { "volumes": %s } }' % value
r = requests.put(cinder_url + project_id + quota_url + target,
headers=headers,
data=data)
if 'forbidden' in json.loads(r._content):
quota = json.loads(r._content)['forbidden']['code']
elif 'badRequest' in json.loads(r._content):
quota = json.loads(r._content)['badRequest']['message']
else:
quota = json.loads(r._content)['quota_set']['volumes']
return quota
def get_quota(token, project_id, target):
headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
r = requests.get(cinder_url + project_id + quota_url + target,
headers=headers)
if 'forbidden' in json.loads(r._content):
quota = json.loads(r._content)['forbidden']['code']
else:
quota = json.loads(r._content)['quota_set']['volumes']
return quota
def quota_show(token, project_id, target):
headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
r = requests.get(cinder_url + project_id + quota_url + target + '?usage=true',
headers=headers)
quota = json.loads(r._content)['quota_set']['volumes']
return quota
def create_volume(token, project_id):
headers = {'X-Auth-Token': token,
'Content-Type': 'application/json'}
data = '{ "volume": { "status": "creating", "description": null, "availability_zone": null, "source_volid": null, "consistencygroup_id": null, "snapshot_id": null, "source_replica": null, "size": 10, "user_id": null, "name": null, "imageRef": null, "attach_status": "detached", "volume_type": null, "project_id": null, "metadata": {} } }'
r = requests.post(cinder_url + project_id + '/volumes',
headers=headers,
data=data)
return json.loads(r._content)['volume']['id']
class Tee(object):
def __init__(self, *files):
self.files = files
def write(self, obj):
for f in self.files:
f.write(obj)
f.flush() # If you want the output to be visible immediately
def flush(self) :
for f in self.files:
f.flush()
|
"""
Copyright (C) 2021 NVIDIA Corporation. All rights reserved.
Licensed under The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, ConcatDataset
import argparse
from utils import inception_utils
from dataloader.dataset import (DatasetLoader)
import pickle
@torch.no_grad()
def extract_features(args, loader, inception, device):
pbar = loader
pools, logits = [], []
for data in pbar:
img = data['image']
# check img dim
if img.shape[1] != 3:
img = img.expand(-1,3,-1,-1)
img = img.to(device)
pool_val, logits_val = inception(img)
pools.append(pool_val.cpu().numpy())
logits.append(F.softmax(logits_val, dim=1).cpu().numpy())
pools = np.concatenate(pools, axis=0)
logits = np.concatenate(logits, axis=0)
return pools, logits
def get_dataset(args):
unlabel_dataset = DatasetLoader(args, args.path, is_label=False, extension=args.extension)
train_val_dataset = DatasetLoader(args, args.path, is_label=True, phase='train-val', extension=args.extension)
dataset = ConcatDataset([unlabel_dataset, train_val_dataset])
return dataset
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser(
description='Calculate Inception v3 features for datasets'
)
parser.add_argument('--size', type=int, default=256)
parser.add_argument('--batch', default=64, type=int, help='batch size')
parser.add_argument('--n_sample', type=int, default=50000)
parser.add_argument('--output', type=str, required=True)
parser.add_argument('--image_mode', type=str, default='RGB')
parser.add_argument('--extension', type=str, default="jpg", help='Extension of the files in the dataset (jpg, png, tiff, etc')
parser.add_argument('path', metavar='PATH', help='path to datset dir')
args = parser.parse_args()
inception = inception_utils.load_inception_net()
dset = get_dataset(args)
loader = DataLoader(dset, batch_size=args.batch, num_workers=4)
pools, logits = extract_features(args, loader, inception, device)
print(f'extracted {pools.shape[0]} features')
print('Calculating inception metrics...')
IS_mean, IS_std = inception_utils.calculate_inception_score(logits)
print('Training data from dataloader has IS of %5.5f +/- %5.5f' % (IS_mean, IS_std))
print('Calculating means and covariances...')
mean = np.mean(pools, axis=0)
cov = np.cov(pools, rowvar=False)
with open(args.output, 'wb') as f:
pickle.dump({'mean': mean, 'cov': cov, 'size': args.size, 'path': args.path}, f)
|
import numpy as np
import math
from solarpy import irradiance_on_plane
from pyephem_sunpath.sunpath import sunpos
# Function to return a solar irradiance in [W/m^2]
# @params: installation - installation object from class Installation
# clouds_dict - dict with datetime and clouds forecast from get_hourly_forecast function
def get_solar_irradiance(installation, clouds_dict):
# Parameters to formula
a_coefficient = 0.25
# Get clear sky irradiance
clear_sky_irradiance_dict = get_clear_sky_solar_irradiance(installation, clouds_dict)
# Get relative insolation dict
relative_insolation_dict = get_relative_insolation(clouds_dict)
# Solar irradiance dict
solar_irradiance_dict = {}
# For every element in clouds dict
for date_time in clear_sky_irradiance_dict:
clear_sky_irradiance = clear_sky_irradiance_dict[date_time]
# It's not the safest solution to use first dict's keys to manage second dict, but they have identical keys
relative_insolation = relative_insolation_dict[date_time]
solar_irradiance = clear_sky_irradiance * (a_coefficient + ((1 - a_coefficient) * relative_insolation))
solar_irradiance_dict[date_time] = solar_irradiance
return solar_irradiance_dict
# Function to return a clear sky solar irradiance in [W/m^2]
# @params: installation - installation object from class Installation
# clouds_dict - dict with datetime and clouds forecast from get_hourly_forecast function
def get_clear_sky_solar_irradiance(installation, clouds_dict):
# Get horizontal solar irradiation dict
horizontal_solar_irradiance_dict = get_horizontal_solar_irradiance(installation, clouds_dict)
# Get sun position dict necessary to get correction factor
sun_position_dict = get_sun_position(installation, clouds_dict)
# Get correction factor dict
correction_factor_dict = get_correction_factor(installation, sun_position_dict)
# Solar Irradiance dict- as a key- datetime, as a value- solar irradiance
solar_irradiance_dict = {}
# For every element in horizontal solar irradiance dict
for date_time in horizontal_solar_irradiance_dict:
horizontal_solar_irradiance = horizontal_solar_irradiance_dict[date_time]
# It's not the safest solution to use first dict's keys to manage second dict, but they have identical keys
correction_factor = correction_factor_dict[date_time]
solar_irradiance = horizontal_solar_irradiance * correction_factor
solar_irradiance_dict[date_time] = solar_irradiance
return solar_irradiance_dict
# Function to return horizontal solar irradiance in [W/m^2]
# Only datetime from clouds_dict will be used
# @params: installation - installation object from class Installation
# clouds_dict - dict with datetime and clouds forecast from get_hourly_forecast function
def get_horizontal_solar_irradiance(installation, clouds_dict):
# Plane pointing zenith
v_norm = np.array([0, 0, -1])
# Installation parameters
altitude = installation.altitude
latitude = installation.latitude
# Solar Irradiance dict- as a key- datetime, as a value- solar irradiance
solar_irradiance_dict = {}
# For every element in clouds dict
for date_time in clouds_dict:
# Computed solar irradiance
irradiance = irradiance_on_plane(v_norm, altitude, date_time, latitude)
# Dict_name[key] = value
solar_irradiance_dict[date_time] = irradiance
return solar_irradiance_dict
# Function to get sun position for datetime, lat, long, timezone
# It returns sun elevation and sun azimuth in [deg], azimuth is a clockwise direction starting from north
# For example- North is 0 degrees azimuth
# @params: installation - installation object from class Installation
# clouds_dict - dict with datetime and clouds forecast from get_hourly_forecast function
def get_sun_position(installation, clouds_dict):
# Installation parameters
latitude = installation.latitude
longitude = installation.longitude
# Sun position dict- as a key- datetime, as a value- sun elevation and azimuth in a list
sun_position_dict = {}
# For every element in clouds dict
for date_time in clouds_dict:
# Timezone value in float type
timezone = date_time.utcoffset().total_seconds() / 3600
# Getting sun elevation and sun azimuth values from sunpos function
sun_elevation, sun_azimuth = sunpos(date_time, latitude, longitude, timezone, dst=False)
# Dict_name[key] = value
sun_position_dict[date_time] = [sun_azimuth, sun_elevation]
return sun_position_dict
# Function to get correction factor of irradiance on tilted pv installation
# @params: installation - installation object from class Installation
# sun_position_dict - dict with datetime and sun position from sun_position function
def get_correction_factor(installation, sun_position_dict):
# Parameters to formula
lat = installation.latitude
azi = installation.azimuth
ele = installation.elevation
# Correction factor dict: datetime is a key and correction factor is a value
correction_factor_dict = {}
# For every element in sun position dict
for date_time in sun_position_dict:
# Get sun azimuth and elevation from sun_position_dict, [azimuth, elevation] in list
sun_azi = sun_position_dict[date_time][0]
sun_ele = sun_position_dict[date_time][1]
correction_factor = correction_factor_formula(lat, azi, ele, sun_azi, sun_ele)
correction_factor_dict[date_time] = correction_factor
return correction_factor_dict
# Function to compute and return correction factor value
# @params: lat- installation latitude
# azi- installation azimuth
# ele- installation elevation
# sun_azi- sun azimuth
# sun_ele- sun elevation
def correction_factor_formula(lat, azi, ele, sun_azi, sun_ele):
nominator_1 = math.sin(math.radians(sun_ele)) * (math.sin(math.radians(lat)) * math.cos(math.radians(ele))
- math.cos(math.radians(lat)) * math.sin(math.radians(ele)) * math.cos(math.radians(azi)))
nominator_2 = math.cos(math.radians(sun_ele)) * (math.cos(math.radians(lat)) * math.cos(math.radians(ele))
* math.cos(math.radians(sun_azi)) + math.sin(math.radians(lat)) * math.sin(math.radians(ele))
* math.cos(math.radians(azi)) * math.cos(math.radians(sun_azi)) + math.sin(math.radians(ele))
* math.sin(math.radians(azi)) * math.sin(math.radians(sun_azi)))
denominator = math.sin(math.radians(sun_ele)) * math.sin(math.radians(lat)) + math.cos(math.radians(sun_ele))\
* math.cos(math.radians(lat)) * math.cos(math.radians(sun_azi))
# Correction factor
correction_factor = abs((nominator_1 + nominator_2) / denominator)
return correction_factor
# Function to calculate relative insolation
# @params: clouds_dict - dict with datetime and clouds forecast from get_hourly_forecast function
def get_relative_insolation(clouds_dict):
# Parameters to formula
empirical_coefficient = 0.0043 # For Poland
# Relative insolation dict
relative_insolation_dict = {}
# For every element in clouds dict
for date_time in clouds_dict:
clouds = clouds_dict[date_time]
# Relative insolation formula, divided by 100 to not return the result as %, only a fraction
relative_insolation = ((100 - clouds) * (1 + empirical_coefficient * clouds) / 100)
relative_insolation_dict[date_time] = relative_insolation
return relative_insolation_dict
|
"""Remove applicable_date from form_distribution
Revision ID: 4aaa5ba83bf5
Revises: 24d87e41078e
Create Date: 2020-02-17 13:48:43.408752
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '4aaa5ba83bf5'
down_revision = '24d87e41078e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('class_date_index', table_name='form_distribution')
op.drop_column('form_distribution', 'applicable_date')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('form_distribution', sa.Column('applicable_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False))
op.create_index('class_date_index', 'form_distribution', ['class_section_id', 'applicable_date'], unique=False)
# ### end Alembic commands ###
|
import numpy as np
from collections import defaultdict
from scipy.sparse import csr_matrix
import pickle as pkl
import os
import sys
from collections import Counter
import random
def align_relation(dataset):
rel_list_0, rel_list_1, rel_list_2 = [], [], []
assign_dict = defaultdict(list)
file0 = dataset + "/triples_1"
file1 = dataset + "/triples_2"
file2 = "../../JAPE/data/dbp15k/{}/0_3/triples_2".format(dataset)
with open(file0, "r") as rf0:
for line in rf0.readlines():
rel_list_0.append(line.strip().split("\t")[1])
with open(file1, "r") as rf1:
for line in rf1.readlines():
rel_list_1.append(line.strip().split("\t")[1])
with open(file2, "r") as rf2:
for line in rf2.readlines():
rel_list_2.append(line.strip().split("\t")[1])
assert len(rel_list_1) == len(rel_list_2)
shared_relation = set(rel_list_0).intersection(set(rel_list_1))
print("original shared relation: ", len(shared_relation))
for i in range(len(rel_list_1)):
assign_dict[rel_list_2[i]].append(rel_list_1[i])
aligned_relation_num = 0
with open(dataset + "/ref_rel_ids", "w") as wf:
for key in assign_dict:
ind = list(set(assign_dict[key]))
assert len(ind) == 1
if key != ind[0]:
aligned_relation_num += 1
wf.write(key + "\t" + ind[0] + "\n")
print("aligned relation: ", aligned_relation_num)
# with open(file1, "r") as rf1:
# with open(dataset + "/triples_2_relaligned", "w") as wf:
# for i,line in enumerate(rf1.readlines()):
# line = line.split("\t")
# line[1] = rel_list_2[i]
# line = "\t".join(line)
# wf.write(line)
def process_wn():
def one_hot(labels):
label_num = np.max([np.max(i) for i in labels if len(i) > 0]) + 1
label_onehot = np.zeros([len(labels), label_num])
idx = []
label_list = []
for i, each in enumerate(labels):
if len(each) > 0:
idx.append(i)
assert len(each) == 1
for j in each:
label_onehot[i][j] = 1.
label_list.append(j)
return label_onehot, idx, label_list
train_data = np.load("class/wordnet/train_data.npz")
test_data = np.load("class/wordnet/test_data.npz")
print(train_data.files)
labels = train_data["labels"]
label_len_list = [len(i) for i in labels]
print(labels)
print("label num: {} dist: {}".format(len(labels), Counter(label_len_list)))
y, idx, label_list = one_hot(labels)
random.shuffle(idx)
train = idx[:int(0.1*len(idx))]
test = idx[int(0.1*len(idx)):]
print("process node with label num: {} dist: {}".format(len(idx), Counter(label_list)))
print("label num: ", len(Counter(label_list)))
head_idx, rel_idx, tail_idx = [], [], []
for tuple in train_data["train_data"]:
head_idx.append(tuple[0])
rel_idx.append(tuple[1])
tail_idx.append(tuple[2])
head_idx, rel_idx, tail_idx = set(head_idx), set(rel_idx), set(tail_idx)
print("num of head id: {}, rel id: {}, tail id: {}".format(len(head_idx), len(rel_idx), len(tail_idx)))
# print(len(head_idx.intersection(rel_idx)))
# print(len(tail_idx.intersection(head_idx)))
KG = np.concatenate([train_data["train_data"], test_data["test_data"]])
print(KG.shape)
e = np.max([train_data["nums_type"][0], train_data["nums_type"][2]])
print(e)
data = {'A': KG,
'y': y,
'train_idx': train,
'test_idx': test,
"e": e
}
with open('class/wordnetpro.pickle', 'wb') as handle:
pkl.dump(data, handle, protocol=pkl.HIGHEST_PROTOCOL)
def process_fb():
def filter(raw_list):
num_dict = defaultdict(int)
for item in raw_list:
num_dict[item] += 1
sort_dict = sorted([[key, num_dict[key]] for key in num_dict], key=lambda x:x[1])
top_dict = sort_dict[-51:-1]
# print(top_dict)
return [each[0] for each in top_dict]
def reorder(raw_list):
order_dict = {}
order = 0
for item in raw_list:
if item not in order_dict:
order_dict[item] = order
order += 1
return order_dict
data = {}
KG = []
for term in ["train", "valid", "test"]:
with open("class/FB15k/freebase_mtr100_mte100-{}.txt".format(term), "r") as rf:
for line in rf.readlines():
line = line.strip().split("\t")
KG.append(line)
ent = [i[0] for i in KG] + [i[2] for i in KG]
rel = [i[1] for i in KG]
ent_order = reorder(ent)
rel_order = reorder(rel)
new_KG = [[ent_order[i[0]],rel_order[i[1]],ent_order[i[2]]] for i in KG]
# data["A"] = new_KG
ent_labels = []
labels = []
with open("class/FB15k/entity2type.txt", "r") as rf:
for line in rf.readlines():
line = line.strip().split("\t")
ent_labels.append(line)
labels += line[1:]
labels = filter(labels)
label_order = reorder(labels)
new_ent_labels = []
for each in ent_labels:
each_label = []
# print(each)
for label in each[1:]:
# print(label)
if label in label_order:
new_ent_labels.append([ent_order[each[0]], label_order[label]])
data = np.array([1. for i in new_ent_labels])
row = np.array([i[0] for i in new_ent_labels])
col = np.array([i[1] for i in new_ent_labels])
y = csr_matrix((data, (row, col)), shape=(len(ent_order), len(label_order)))
# data["y"] = y
train, test = [], []
with open("class/FB15k/train.txt", "r") as rf:
for line in rf.readlines():
line = line.strip()
train.append(ent_order[line])
with open("class/FB15k/test.txt", "r") as rf:
for line in rf.readlines():
line = line.strip()
test.append(ent_order[line])
# data['train_idx'] = train
# data['test_idx'] = test
# data["e"] = len(ent_order)
# print(train[:10])
# print(test[:10])
data = {'A': new_KG,
'y': y,
'train_idx': train,
'test_idx': test,
"e": len(ent_order)
}
with open('class/fb15kpro.pickle', 'wb') as handle:
pkl.dump(data, handle, protocol=pkl.HIGHEST_PROTOCOL)
if __name__ == '__main__':
dataset = sys.argv[1]
align_relation(dataset)
# process_fb()
# process_wn()
|
# Generated by Django 3.2.12 on 2022-03-15 07:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menu', '0011_alter_size_size'),
]
operations = [
migrations.AddField(
model_name='size',
name='price',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12),
preserve_default=False,
),
migrations.AlterField(
model_name='size',
name='size',
field=models.CharField(choices=[('DEFAULT', 'DEFAULT'), ('L', 'L'), ('XL', 'XL'), ('FAMILY', 'FAMILY')], error_messages={'unique': 'That size already exists.'}, max_length=7, unique=True),
),
]
|
# Generated by Django 2.2 on 2019-05-14 13:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
('topics', '0036_auto_20190513_2253'),
]
operations = [
migrations.RenameField(
model_name='section',
old_name='course_learner',
new_name='section_learner',
),
migrations.CreateModel(
name='Learner_Quiz_Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isFinished', models.BooleanField(default=False)),
('learner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Learner')),
('quiz', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='topics.Quiz')),
],
),
migrations.CreateModel(
name='Learner_Lecture_Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isFinished', models.BooleanField(default=False)),
('learner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Learner')),
('lecture', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='topics.Lecture')),
],
),
migrations.AddField(
model_name='lecture',
name='lecture_learner',
field=models.ManyToManyField(blank=True, through='topics.Learner_Lecture_Record', to='accounts.Learner'),
),
migrations.AddField(
model_name='quiz',
name='quiz_learner',
field=models.ManyToManyField(blank=True, through='topics.Learner_Quiz_Record', to='accounts.Learner'),
),
]
|
# Generated by Django 2.2.24 on 2022-01-09 18:58
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0004_product_date'),
]
operations = [
migrations.AlterField(
model_name='product',
name='date',
field=models.DateField(default=datetime.datetime.now, verbose_name='дата'),
),
]
|
rom pylayers.antprop.antenna import *
A = Antenna(typ='azel',param={'filename':'antenna.ant','pol':'V'})
|
months = ["JAN","FEB","MAR","APR","MAY","JUN","JUL","AUG","SEP","OCT","NOV","DEC"]
def clean_value(inp):
inp = inp*100
inp ="{0:.2f}".format(inp)
return inp
def clean_aditya(data):
for item in data:
actual_date = item["Date"].split("-")
day = actual_date[0]
month = months.index(actual_date[1]) + 1
year = actual_date[2]
item["Date"] = str(month) + "-" + day + "-" + year
return data
def clean_axis(data):
for item in data:
actual_date = item["Date"].split("-")
day = actual_date[0]
month = actual_date[1]
year = actual_date[2]
item["Date"] = month + "-" + day + "-" + year
return data
def clean_baroda(data):
for item in data:
actual_date = item["Date"].split("/")
day = actual_date[0]
month = actual_date[1]
year = actual_date[2]
item["Date"] = month + "-" + day + "-" + year
return data
def clean_bnp(data):
for item in data:
actual_date = item["Date"].split("/")
day = actual_date[0]
month = actual_date[1]
year = actual_date[2]
item["Date"] = month + "-" + day + "-" + year
return data
def clean_canara(data):
for item in data:
actual_date = item["Date"].split("/")
day = actual_date[1]
month = actual_date[0]
year = actual_date[2]
item["Date"] = month + "-" + day + "-" + year
return data
def clean_edelweiss(data):
for item in data:
actual_date = item["Date"].split(" ")
day = actual_date[0]
month = months.index(actual_date[1].upper()) + 1
year = actual_date[2]
item["Date"] = str(month) + "-" + day + "-" + year
return data
def clean_essel(data):
for item in data:
actual_date = item["Date"].split("/")
day = actual_date[0]
month = actual_date[1]
year = actual_date[2]
item["Date"] = month + "-" + day + "-" + year
item["REGULAR_Base_TER_perc"] = "{0:.2f}".format(item["REGULAR_Base_TER_perc"])
item["REGULAR_Addnl_52_6a_b_exp_perc"] = "{0:.2f}".format(item["REGULAR_Addnl_52_6a_b_exp_perc"])
item["REGULAR_Addnl_52_6a_c_exp_perc"] = "{0:.2f}".format(item["REGULAR_Addnl_52_6a_c_exp_perc"])
item["REGULAR_GST_perc"] = "{0:.2f}".format(item["REGULAR_GST_perc"])
item["REGULAR_Total_ter_perc"] = "{0:.2f}".format(item["REGULAR_Total_ter_perc"])
item["DIRECT_Base_TER_perc"] = "{0:.2f}".format(item["DIRECT_Base_TER_perc"])
item["DIRECT_Addnl_52_6a_b_exp_perc"] = "{0:.2f}".format(item["DIRECT_Addnl_52_6a_b_exp_perc"])
item["DIRECT_Addnl_52_6a_c_exp_perc"] = "{0:.2f}".format(item["DIRECT_Addnl_52_6a_c_exp_perc"])
item["DIRECT_GST_perc"] = "{0:.2f}".format(item["DIRECT_GST_perc"])
item["DIRECT_Total_ter_perc"] = "{0:.2f}".format(item["DIRECT_Total_ter_perc"])
return data
def clean_icici(data):
for item in data:
actual_date = item["Date"].split("/")
day = actual_date[0]
month = actual_date[1]
year = actual_date[2]
item["Date"] = month + "-" + day + "-" + year
item["REGULAR_Base_TER_perc"] = item["REGULAR_Base_TER_perc"][:-1]
item["REGULAR_Addnl_52_6a_b_exp_perc"] = item["REGULAR_Addnl_52_6a_b_exp_perc"][:-1]
item["REGULAR_Addnl_52_6a_c_exp_perc"] = item["REGULAR_Addnl_52_6a_c_exp_perc"][:-1]
item["REGULAR_GST_perc"] = item["REGULAR_GST_perc"][:-1]
item["REGULAR_Total_ter_perc"] = item["REGULAR_Total_ter_perc"][:-1]
item["DIRECT_Base_TER_perc"] = item["DIRECT_Base_TER_perc"][:-1]
item["DIRECT_Addnl_52_6a_b_exp_perc"] = item["DIRECT_Addnl_52_6a_b_exp_perc"][:-1]
item["DIRECT_Addnl_52_6a_c_exp_perc"] = item["DIRECT_Addnl_52_6a_c_exp_perc"][:-1]
item["DIRECT_GST_perc"] = item["DIRECT_GST_perc"][:-1]
item["DIRECT_Total_ter_perc"] = item["DIRECT_Total_ter_perc"][:-1]
return data
def clean_idbi(data):
for item in data:
item["REGULAR_Total_ter_perc"] = "{0:.2f}".format(item["REGULAR_Total_ter_perc"])
return data
def clean_iifcl(data):
for item in data:
item["REGULAR_Base_TER_perc"] = "" if item["REGULAR_Base_TER_perc"] == "N/A" else item["REGULAR_Base_TER_perc"]
item["REGULAR_Addnl_52_6a_b_exp_perc"] = "" if item["REGULAR_Addnl_52_6a_b_exp_perc"] == "N/A" else item["REGULAR_Addnl_52_6a_b_exp_perc"]
item["REGULAR_Addnl_52_6a_c_exp_perc"] = "" if item["REGULAR_Addnl_52_6a_c_exp_perc"] == "N/A" else item["REGULAR_Addnl_52_6a_c_exp_perc"]
item["REGULAR_GST_perc"] = "" if item["REGULAR_GST_perc"] == "N/A" else item["REGULAR_GST_perc"]
item["REGULAR_Total_ter_perc"] = "" if item["REGULAR_Total_ter_perc"] == "N/A" else item["REGULAR_Total_ter_perc"]
item["DIRECT_Base_TER_perc"] = "" if item["DIRECT_Base_TER_perc"] == "N/A" else item["DIRECT_Base_TER_perc"]
item["DIRECT_Addnl_52_6a_b_exp_perc"] = "" if item["DIRECT_Addnl_52_6a_b_exp_perc"] == "N/A" else item["DIRECT_Addnl_52_6a_b_exp_perc"]
item["DIRECT_Addnl_52_6a_c_exp_perc"] = "" if item["DIRECT_Addnl_52_6a_c_exp_perc"] == "N/A" else item["DIRECT_Addnl_52_6a_c_exp_perc"]
item["DIRECT_GST_perc"] = "" if item["DIRECT_GST_perc"] == "N/A" else item["DIRECT_GST_perc"]
item["DIRECT_Total_ter_perc"] = "" if item["DIRECT_Total_ter_perc"] == "N/A" else item["DIRECT_Total_ter_perc"]
return data
def clean_iifcl(data):
for item in data:
item["REGULAR_Base_TER_perc"] = "" if item["REGULAR_Base_TER_perc"] == "N/A" else item["REGULAR_Base_TER_perc"]
item["REGULAR_Addnl_52_6a_b_exp_perc"] = "" if item["REGULAR_Addnl_52_6a_b_exp_perc"] == "N/A" else item["REGULAR_Addnl_52_6a_b_exp_perc"]
item["REGULAR_Addnl_52_6a_c_exp_perc"] = "" if item["REGULAR_Addnl_52_6a_c_exp_perc"] == "N/A" else item["REGULAR_Addnl_52_6a_c_exp_perc"]
item["REGULAR_GST_perc"] = "" if item["REGULAR_GST_perc"] == "N/A" else item["REGULAR_GST_perc"]
item["REGULAR_Total_ter_perc"] = "" if item["REGULAR_Total_ter_perc"] == "N/A" else item["REGULAR_Total_ter_perc"]
item["DIRECT_Base_TER_perc"] = "" if item["DIRECT_Base_TER_perc"] == "N/A" else item["DIRECT_Base_TER_perc"]
item["DIRECT_Addnl_52_6a_b_exp_perc"] = "" if item["DIRECT_Addnl_52_6a_b_exp_perc"] == "N/A" else item["DIRECT_Addnl_52_6a_b_exp_perc"]
item["DIRECT_Addnl_52_6a_c_exp_perc"] = "" if item["DIRECT_Addnl_52_6a_c_exp_perc"] == "N/A" else item["DIRECT_Addnl_52_6a_c_exp_perc"]
item["DIRECT_GST_perc"] = "" if item["DIRECT_GST_perc"] == "N/A" else item["DIRECT_GST_perc"]
item["DIRECT_Total_ter_perc"] = "" if item["DIRECT_Total_ter_perc"] == "N/A" else item["DIRECT_Total_ter_perc"]
return data
def clean_ilfs(data):
for item in data:
item["REGULAR_Base_TER_perc"] = "" if item["REGULAR_Base_TER_perc"] == "N.A." else item["REGULAR_Base_TER_perc"]
item["REGULAR_Addnl_52_6a_b_exp_perc"] = "" if item["REGULAR_Addnl_52_6a_b_exp_perc"] == "N.A." else item["REGULAR_Addnl_52_6a_b_exp_perc"]
item["REGULAR_Addnl_52_6a_c_exp_perc"] = "" if item["REGULAR_Addnl_52_6a_c_exp_perc"] == "N.A." else item["REGULAR_Addnl_52_6a_c_exp_perc"]
item["REGULAR_GST_perc"] = clean_value(item["REGULAR_GST_perc"])
item["REGULAR_Total_ter_perc"] = clean_value(item["REGULAR_Total_ter_perc"])
item["DIRECT_Base_TER_perc"] = "" if item["DIRECT_Base_TER_perc"] == "N.A." else item["DIRECT_Base_TER_perc"]
item["DIRECT_Addnl_52_6a_b_exp_perc"] = "" if item["DIRECT_Addnl_52_6a_b_exp_perc"] == "N.A." else item["DIRECT_Addnl_52_6a_b_exp_perc"]
item["DIRECT_Addnl_52_6a_c_exp_perc"] = "" if item["DIRECT_Addnl_52_6a_c_exp_perc"] == "N.A." else item["DIRECT_Addnl_52_6a_c_exp_perc"]
item["DIRECT_GST_perc"] = clean_value(item["DIRECT_GST_perc"])
item["DIRECT_Total_ter_perc"] = clean_value(item["DIRECT_Total_ter_perc"])
return data
def clean_indiabulls(data):
for item in data:
item["Name of scheme"] = item["Name of scheme"][:-1]
return data
def clean_lic(data):
for item in data:
actual_date = item["Date"].split("-")
day = actual_date[0]
month = actual_date[1]
year = actual_date[2]
item["Date"] = month + "-" + day + "-" + year
return data
def clean_mahindra(data):
for item in data:
actual_date = item["Date"].split("-")
day = actual_date[0]
month = months.index(actual_date[1].upper()) + 1
year = actual_date[2]
item["Date"] = str(month) + "-" + day + "-" + year
item["REGULAR_Base_TER_perc"] = item["REGULAR_Base_TER_perc"][:-1]
item["REGULAR_Addnl_52_6a_b_exp_perc"] = item["REGULAR_Addnl_52_6a_b_exp_perc"][:-1]
item["REGULAR_Addnl_52_6a_c_exp_perc"] = item["REGULAR_Addnl_52_6a_c_exp_perc"][:-1]
item["REGULAR_GST_perc"] = item["REGULAR_GST_perc"][:-1]
item["REGULAR_Total_ter_perc"] = item["REGULAR_Total_ter_perc"][:-1]
item["DIRECT_Base_TER_perc"] = item["DIRECT_Base_TER_perc"][:-1]
item["DIRECT_Addnl_52_6a_b_exp_perc"] = item["DIRECT_Addnl_52_6a_b_exp_perc"][:-1]
item["DIRECT_Addnl_52_6a_c_exp_perc"] = item["DIRECT_Addnl_52_6a_c_exp_perc"][:-1]
item["DIRECT_GST_perc"] = item["DIRECT_GST_perc"][:-1]
item["DIRECT_Total_ter_perc"] = item["DIRECT_Total_ter_perc"][:-1]
return data
def clean_motilal(data):
for item in data:
actual_date = item["Date"].split("-")
day = actual_date[0]
month = months.index(actual_date[1].upper()) + 1
year = actual_date[2]
item["Date"] = str(month) + "-" + day + "-" + year
return data
def clean_quantum(data):
for item in data:
actual_date = item["Date"].split("-")
day = actual_date[0]
month = actual_date[1]
year = actual_date[2]
item["Date"] = month + "-" + day + "-" + year
return data
def clean_reliance(data):
for item in data:
item["REGULAR_Base_TER_perc"] = item["REGULAR_Base_TER_perc"] * 100
item["REGULAR_Addnl_52_6a_b_exp_perc"] = item["REGULAR_Addnl_52_6a_b_exp_perc"] * 100
item["REGULAR_Addnl_52_6a_c_exp_perc"] = item["REGULAR_Addnl_52_6a_c_exp_perc"] * 100
item["REGULAR_GST_perc"] = item["REGULAR_GST_perc"] * 100
item["REGULAR_Total_ter_perc"] = item["REGULAR_Total_ter_perc"] * 100
item["DIRECT_Base_TER_perc"] = item["DIRECT_Base_TER_perc"] * 100
item["DIRECT_Addnl_52_6a_b_exp_perc"] = item["DIRECT_Addnl_52_6a_b_exp_perc"] * 100
item["DIRECT_Addnl_52_6a_c_exp_perc"] = item["DIRECT_Addnl_52_6a_c_exp_perc"] * 100
item["DIRECT_GST_perc"] = item["DIRECT_GST_perc"] * 100
item["DIRECT_Total_ter_perc"] = item["DIRECT_Total_ter_perc"] * 100
return data
def clean_sbi(data):
for item in data:
item["REGULAR_GST_perc"] = "{0:.2f}".format(item["REGULAR_GST_perc"])
return data
def clean_sundaram(data):
for item in data:
actual_date = item["Date"].split("-")
day = actual_date[0]
month = actual_date[1]
year = actual_date[2]
item["Date"] = month + "-" + day + "-" + year
return data
def clean_tata(data):
for item in data:
item["REGULAR_Base_TER_perc"] = "{0:.2f}".format(item["REGULAR_Base_TER_perc"])
item["REGULAR_Addnl_52_6a_b_exp_perc"] = "{0:.2f}".format(item["REGULAR_Addnl_52_6a_b_exp_perc"])
item["REGULAR_Addnl_52_6a_c_exp_perc"] = "{0:.2f}".format(item["REGULAR_Addnl_52_6a_c_exp_perc"])
item["REGULAR_GST_perc"] = "{0:.2f}".format(item["REGULAR_GST_perc"])
item["REGULAR_Total_ter_perc"] = "{0:.2f}".format(item["REGULAR_Total_ter_perc"])
item["DIRECT_Base_TER_perc"] = "{0:.2f}".format(item["DIRECT_Base_TER_perc"])
item["DIRECT_Addnl_52_6a_b_exp_perc"] = "{0:.2f}".format(item["DIRECT_Addnl_52_6a_b_exp_perc"])
item["DIRECT_Addnl_52_6a_c_exp_perc"] = "{0:.2f}".format(item["DIRECT_Addnl_52_6a_c_exp_perc"])
item["DIRECT_GST_perc"] = "{0:.2f}".format(item["DIRECT_GST_perc"])
item["DIRECT_Total_ter_perc"] = "{0:.2f}".format(item["DIRECT_Total_ter_perc"])
return data
def clean_taurus(data):
for item in data:
actual_date = item["Date"].split("/")
day = actual_date[0]
month = actual_date[1]
year = actual_date[2]
item["Date"] = month + "-" + day + "-" + year
return data
def clean_union(data):
for item in data:
item["REGULAR_Base_TER_perc"] = "{0:.2f}".format(item["REGULAR_Base_TER_perc"])
item["REGULAR_Addnl_52_6a_b_exp_perc"] = "{0:.2f}".format(item["REGULAR_Addnl_52_6a_b_exp_perc"])
item["REGULAR_Addnl_52_6a_c_exp_perc"] = "{0:.2f}".format(item["REGULAR_Addnl_52_6a_c_exp_perc"])
item["REGULAR_GST_perc"] = "{0:.2f}".format(item["REGULAR_GST_perc"])
item["REGULAR_Total_ter_perc"] = "{0:.2f}".format(item["REGULAR_Total_ter_perc"])
item["DIRECT_Base_TER_perc"] = "{0:.2f}".format(item["DIRECT_Base_TER_perc"])
item["DIRECT_Addnl_52_6a_b_exp_perc"] = "{0:.2f}".format(item["DIRECT_Addnl_52_6a_b_exp_perc"])
item["DIRECT_Addnl_52_6a_c_exp_perc"] = "{0:.2f}".format(item["DIRECT_Addnl_52_6a_c_exp_perc"])
item["DIRECT_GST_perc"] = "{0:.2f}".format(item["DIRECT_GST_perc"])
item["DIRECT_Total_ter_perc"] = "{0:.2f}".format(item["DIRECT_Total_ter_perc"])
return data
def clean_uti(data):
for item in data:
actual_date = item["Date"].split("/")
day = actual_date[0]
month = actual_date[1]
year = actual_date[2]
item["Date"] = month + "-" + day + "-" + year
return data
|
import pytest
import numpy as np
import gym
from lagom.envs import RecordEpisodeStatistics
from lagom.envs import NormalizeObservation
from lagom.envs import NormalizeReward
from lagom.envs import TimeStepEnv
@pytest.mark.parametrize('env_id', ['CartPole-v0', 'Pendulum-v0'])
@pytest.mark.parametrize('deque_size', [2, 5])
def test_record_episode_statistics(env_id, deque_size):
env = gym.make(env_id)
env = RecordEpisodeStatistics(env, deque_size)
for n in range(5):
env.reset()
assert env.episode_return == 0.0
assert env.episode_horizon == 0
for t in range(env.spec.max_episode_steps):
_, _, done, info = env.step(env.action_space.sample())
if done:
assert 'episode' in info
assert all([item in info['episode'] for item in ['return', 'horizon', 'time']])
break
assert len(env.return_queue) == deque_size
assert len(env.horizon_queue) == deque_size
@pytest.mark.parametrize('env_id', ['CartPole-v1', 'Pendulum-v0'])
def test_normalize_observation(env_id):
env = gym.make(env_id)
wrapped_env = NormalizeObservation(gym.make(env_id))
unbiased = []
env.seed(0)
wrapped_env.seed(0)
obs = env.reset()
wrapped_obs = wrapped_env.reset()
unbiased.append(obs)
for t in range(env.spec.max_episode_steps):
action = env.action_space.sample()
obs, _, done, _ = env.step(action)
wrapped_obs, _, wrapped_done, _ = wrapped_env.step(action)
unbiased.append(obs)
mean = np.mean(unbiased, 0)
var = np.var(unbiased, 0)
assert np.allclose(wrapped_env.obs_moments.mean, mean, atol=1e-5)
assert np.allclose(wrapped_env.obs_moments.var, var, atol=1e-4)
assert done == wrapped_done
if done:
break
@pytest.mark.parametrize('env_id', ['CartPole-v1', 'Pendulum-v0'])
@pytest.mark.parametrize('gamma', [0.5, 0.99])
def test_normalize_reward(env_id, gamma):
env = gym.make(env_id)
wrapped_env = NormalizeReward(gym.make(env_id), gamma=gamma)
unbiased = []
env.seed(0)
wrapped_env.seed(0)
for n in range(10):
env.reset()
wrapped_env.reset()
G = 0.0
for t in range(env.spec.max_episode_steps):
action = env.action_space.sample()
_, reward, done, _ = env.step(action)
_, wrapped_reward, wrapped_done, _ = wrapped_env.step(action)
assert done == wrapped_done
G = reward + gamma*G
unbiased.append(G)
if done:
break
mean = np.mean(unbiased, 0)
var = np.var(unbiased, 0)
assert wrapped_env.all_returns == G
assert np.allclose(wrapped_env.reward_moments.mean, mean, atol=1e-4)
assert np.allclose(wrapped_env.reward_moments.var, var, atol=1e-3)
@pytest.mark.parametrize('env_id', ['CartPole-v1', 'Pendulum-v0'])
def test_timestep_env(env_id):
env = gym.make(env_id)
wrapped_env = TimeStepEnv(gym.make(env_id))
env.seed(0)
wrapped_env.seed(0)
obs = env.reset()
timestep = wrapped_env.reset()
assert timestep.first()
assert np.allclose(timestep.observation, obs)
for t in range(env.spec.max_episode_steps):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
timestep = wrapped_env.step(action)
assert np.allclose(timestep.observation, obs)
assert timestep.reward == reward
assert timestep.done == done
assert timestep.info == info
if done:
assert timestep.last()
if 'TimeLimit.truncated' in info and info['TimeLimit.truncated']:
assert timestep.time_limit()
else:
assert timestep.terminal()
break
else:
assert timestep.mid()
|
from youtube_dl import YoutubeDL
import os, sys
sys.path.append(os.getcwd())
from db.db_pool_handler import InstantDBPool
from PIL import Image
from utils.snow_id import HSIS
from tenacity import retry, wait_random
import json, pymysql, time, traceback, shutil
class VideoDownload(object):
def __init__(self):
self.db_handle = InstantDBPool().get_connect()
with open(os.getcwd() + "/sync_ytb_video/config.json", 'r') as f0:
info = json.load(f0)
self.ydl_opts = {
'writesubtitles': True,
'subtitlesformat': 'vtt',
'ignoreerrors': True,
'writethumbnail': True,
'writeinfojson': True,
'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/bestvideo+bestaudio',
'recode_video': 'mp4',
'merge_output_format': 'mp4',
'nocheckcertificate': True,
"proxy": info["ydl_opts"]["proxy"],
"outtmpl": info["ydl_opts"]["outtmpl"],
"cookies": info["ydl_opts"]["cookies"]
}
self.file_path = info["file_path"]
self.file_path_temp = info["file_path"] + "temp/"
if not os.path.exists(self.file_path_temp):
os.makedirs(self.file_path_temp)
def run(self):
os.system("sudo pip3 install --upgrade youtube-dl")
uncatch_channel_sql = "SELECT channel_id, channel_url from bus_channel"
uncatch_channel = self.db_handle.search(uncatch_channel_sql)
try:
for j in uncatch_channel:
# sql_map = {}
select_all_sql = "SELECT video_ytb_id, video_status from bus_video where video_author = '%s'" % str(
j["channel_id"])
all_video = self.db_handle.search(select_all_sql)
if all_video:
all_video_list = [i["video_ytb_id"] for i in all_video]
else:
all_video_list = []
ydl = YoutubeDL(self.ydl_opts)
ydl.add_default_info_extractors()
info = ydl.extract_info(j["channel_url"], download=False)
# 测试使用
with open("full_info.json", 'w') as fp:
json.dump(info, fp)
with open("full_info.json", 'r') as f0:
info = json.load(f0)
for i in info["entries"]:
if i:
if "id" in i:
if i["id"] not in all_video_list:
video_status = "2"
else:
video_status = "-1"
video_ytb_id = pymysql.escape_string(i["id"])
else:
continue
else:
continue
if "webpage_url" in i:
video_url = pymysql.escape_string(i["webpage_url"])
else:
continue
if "title" in i:
video_title = pymysql.escape_string(i["title"])
else:
video_title = ""
if "description" in i:
video_profile = pymysql.escape_string(i["description"])
else:
video_profile = ""
if "upload_date" in i:
timeArray = time.strptime(i["upload_date"], "%Y%m%d")
video_publish = time.strftime("%Y-%m-%d", timeArray)
else:
video_publish = "1970-01-02"
video_class = ""
if "categories" in i and "tags" in i:
if i["categories"] is not None and i["tags"] is not None:
_video_class = i["categories"] + i["tags"]
video_class = pymysql.escape_string(json.dumps(_video_class, ensure_ascii=False))
elif i["categories"] is None and i["tags"] is not None:
_video_class = i["tags"]
video_class = pymysql.escape_string(json.dumps(_video_class, ensure_ascii=False))
elif i["categories"] is not None and i["tags"] is None:
_video_class = i["categories"]
video_class = pymysql.escape_string(json.dumps(_video_class, ensure_ascii=False))
if video_status == "2":
insert_video_sql = "INSERT INTO bus_video(video_ytb_id ,video_title, video_profile, video_url," \
" video_status, video_class, video_author, video_publish) VALUES " \
"('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')" % \
(video_ytb_id, video_title, video_profile, video_url, video_status, video_class,
str(j["channel_id"]), video_publish)
self.db_handle.modify(insert_video_sql)
else:
update_video_sql = "UPDATE bus_video set video_title = '%s', video_profile = '%s', video_url = '%s'," \
" video_class = '%s', video_author = '%s', video_publish = '%s' where video_ytb_id = '%s'" % \
(video_title, video_profile, video_url, video_class, str(j["channel_id"]),
video_publish, video_ytb_id)
self.db_handle.modify(update_video_sql)
except Exception as e:
traceback.print_exc()
print(e)
self.video_dl()
# 把建立视频索引和下载视频的操作进行隔离,确保即便当前cron job出错,下一个生命周期也能兜底
@retry(wait=wait_random(min=3600, max=7200))
def video_dl(self):
ydlk = YoutubeDL(self.ydl_opts)
ydlk.add_default_info_extractors()
dl_sql = "select video_ytb_id, video_url from bus_video where video_pic IS NULL or video_pic ='' ORDER BY RAND()"
dl_url = self.db_handle.search(dl_sql)
for t in dl_url:
ydlk.extract_info(t["video_url"], download=True)
after_name = HSIS.main()
# 重命名视频
os.rename(self.file_path_temp + t["video_ytb_id"] + ".mp4", self.file_path_temp + after_name + ".mp4")
os.rename(self.file_path_temp + t["video_ytb_id"] + ".info.json", self.file_path_temp + after_name + ".json")
# 重命名图片
if os.path.isfile(self.file_path_temp + t["video_ytb_id"] + ".jpg"):
os.rename(self.file_path_temp + t["video_ytb_id"] + ".jpg", self.file_path_temp + after_name + ".jpg")
elif os.path.isfile(self.file_path_temp + t["video_ytb_id"] + ".webp"):
target = Image.open(self.file_path_temp + t["video_ytb_id"] + ".webp")
target = target.convert('RGB')
target.save(self.file_path_temp + after_name + ".jpg", quality=100)
os.remove(self.file_path_temp + t["video_ytb_id"] + ".webp")
elif os.path.isfile(self.file_path_temp + t["video_ytb_id"] + ".png"):
target = Image.open(self.file_path_temp + t["video_ytb_id"] + ".png")
target = target.convert('RGB')
target.save(self.file_path_temp + after_name + ".jpg", quality=100)
os.remove(self.file_path_temp + t["video_ytb_id"] + ".png")
else:
after_name = "undefined"
# 如有字幕,重命名字幕
has_sub = "0"
sub_list = []
for root, dirs, files in os.walk(self.file_path_temp):
# 遍历所有文件
for file in files:
if file.endswith('.vtt'):
has_sub = "1"
sub_name = file.split(".")
# 下表为1的值表示字幕语言(例如:zh-Hant)
if sub_name[1] == "vtt":
# 表示默认语言,直接vtt即可
os.rename(self.file_path_temp + file, self.file_path_temp + after_name + ".vtt")
else:
sub_list.append(sub_name[1])
os.rename(self.file_path_temp + file, self.file_path_temp + after_name + "-" + sub_name[1] + ".vtt")
if sub_list:
sub_list_fin = pymysql.escape_string(json.dumps(sub_list, ensure_ascii=False))
else:
sub_list_fin = ""
# 把所有文件移动到主文件夹下
temp_files = os.listdir(self.file_path_temp)
for file in temp_files:
shutil.move(self.file_path_temp + file, self.file_path)
update_video_sql = "update bus_video set video_path = '%s', video_json = '%s', video_pic = '%s', video_status = '%s', video_is_huge = '%s', video_has_subtitle = '%s', video_sub_list = '%s' where video_ytb_id = '%s'" % \
(after_name + ".mp4", after_name + ".json", after_name + ".jpg", "0", "1", has_sub, sub_list_fin, t["video_ytb_id"])
self.db_handle.modify(update_video_sql)
if __name__ == '__main__':
video_download = VideoDownload()
video_download.run()
# video_download.video_dl()
|
"""Common database code used by multiple `covid_hosp` scrapers."""
# standard library
from contextlib import contextmanager
import math
# third party
import mysql.connector
import pandas as pd
# first party
import delphi.operations.secrets as secrets
class Database:
def __init__(self,
connection,
table_name=None,
columns_and_types=None,
additional_fields=None):
"""Create a new Database object.
Parameters
----------
connection
An open connection to a database.
table_name : str
The name of the table which holds the dataset.
columns_and_types : tuple[str, str, Callable]
List of 3-tuples of (CSV header name, SQL column name, data type) for
all the columns in the CSV file.
additional_fields : tuple[str]
List of 2-tuples of (value, SQL column name) fordditional fields to include
at the end of the row which are not present in the CSV data.
"""
self.connection = connection
self.table_name = table_name
self.publication_col_name = "issue" if table_name == 'covid_hosp_state_timeseries' else \
'publication_date'
self.columns_and_types = columns_and_types
self.additional_fields = additional_fields if additional_fields is not None else []
@classmethod
@contextmanager
def connect(database_class, mysql_connector_impl=mysql.connector):
"""Connect to a database and provide the connection as a context manager.
As long as the context manager exits normally, the connection's transaction
will be committed. Otherwise, if the context is exited by an Exception, the
transaction will be rolled back.
In any case, the connection will be gracefully closed upon exiting the
context manager.
"""
# connect to the database
user, password = secrets.db.epi
connection = mysql_connector_impl.connect(
host=secrets.db.host,
user=user,
password=password,
database='epidata')
try:
# provide the connection to the context manager
yield database_class(connection)
# rollback by default; the following commit will only take place if no
# exception was raised in calling code
connection.commit()
finally:
# close the connection in any case
connection.close()
@contextmanager
def new_cursor(self):
"""Create and provide a database cursor as a context manager.
The cursor will be gracefully closed upon exiting the context manager.
"""
cursor = self.connection.cursor()
try:
yield cursor
finally:
cursor.close()
def contains_revision(self, revision):
"""Return whether the given revision already exists in the database.
Parameters
----------
revision : str
Unique revision string.
Returns
-------
bool
True iff the revision already exists.
"""
with self.new_cursor() as cursor:
cursor.execute('''
SELECT
count(1) > 0
FROM
`covid_hosp_meta`
WHERE
`dataset_name` = %s AND `revision_timestamp` = %s
''', (self.table_name, revision))
for (result,) in cursor:
return bool(result)
def insert_metadata(self, publication_date, revision, meta_json):
"""Add revision metadata to the database.
Parameters
----------
publication_date : int
Date when the dataset was published in YYYYMMDD format.
revision : str
Unique revision string.
meta_json : str
Metadata serialized as a JSON string.
"""
with self.new_cursor() as cursor:
cursor.execute('''
INSERT INTO
`covid_hosp_meta` (
`dataset_name`,
`publication_date`,
`revision_timestamp`,
`metadata_json`,
`acquisition_datetime`
)
VALUES
(%s, %s, %s, %s, NOW())
''', (self.table_name, publication_date, revision, meta_json))
def insert_dataset(self, publication_date, dataframe):
"""Add a dataset to the database.
Parameters
----------
publication_date : int
Date when the dataset was published in YYYYMMDD format.
dataframe : pandas.DataFrame
The dataset.
"""
num_columns = 2 + len(self.columns_and_types) + len(self.additional_fields)
value_placeholders = ', '.join(['%s'] * num_columns)
columns = ', '.join(f'`{i[1]}`' for i in self.columns_and_types + self.additional_fields)
sql = f'INSERT INTO `{self.table_name}` (`id`, `{self.publication_col_name}`, {columns}) ' \
f'VALUES ({value_placeholders})'
id_and_publication_date = (0, publication_date)
with self.new_cursor() as cursor:
for _, row in dataframe.iterrows():
values = []
for name, _, dtype in self.columns_and_types:
if isinstance(row[name], float) and math.isnan(row[name]):
values.append(None)
else:
values.append(dtype(row[name]))
cursor.execute(sql,
id_and_publication_date +
tuple(values) +
tuple(i[0] for i in self.additional_fields))
def get_max_issue(self):
"""Fetch the most recent issue.
This is used to bookend what updates we pull in from the HHS metadata.
"""
with self.new_cursor() as cursor:
cursor.execute(f'''
SELECT
max(publication_date)
from
`covid_hosp_meta`
WHERE
dataset_name = "{self.table_name}"
''')
for (result,) in cursor:
if result is not None:
return pd.Timestamp(str(result))
return pd.Timestamp("1900/1/1")
|
from sqlalchemy import Column, Integer, String
from .Base import Base
class Genre(Base):
__tablename__ = 'genres'
id = Column(Integer, primary_key=True)
name = Column(String(200))
|
"""
Declare and configure the models for the courses application
"""
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from cms.extensions.extension_pool import extension_pool
from ..core.models import BasePageExtension
class Organization(BasePageExtension):
"""
The organization page extension represents and records entities that manage courses.
It could be a university or a training company for example.
This model should be used to record structured data about the organization whereas the
associated page object is where we record the less structured information to display on the
page to present the organization.
"""
code = models.CharField(
_("code"), db_index=True, max_length=100, null=True, blank=True
)
logo = models.ImageField(
upload_to="organizations/logo/",
verbose_name=_("organization logo"),
help_text=_("Recommended size: 180x100"),
blank=True,
)
ROOT_REVERSE_ID = "organizations"
TEMPLATE_DETAIL = "courses/cms/organization_detail.html"
class Meta:
verbose_name = _("organization")
def __str__(self):
"""Human representation of an organization"""
return "{model}: {name} ({code})".format(
code=self.code,
name=self.extended_object.get_title(),
model=self._meta.verbose_name.title(),
)
def copy_relations(self, oldinstance, language):
"""
We must manually copy the many-to-many relations from the "draft" instance of
to the "published" instance.
"""
self.courses.set(oldinstance.courses.drafts())
def clean(self):
"""
We normalize the code with slugify for better uniqueness
"""
if self.code:
# Normalize the code by slugifying and capitalizing it
self.code = slugify(self.code, allow_unicode=True).upper()
return super().clean()
def validate_unique(self, exclude=None):
"""
We can't rely on a database constraint for uniqueness because pages
exist in two versions: draft and published.
"""
if self.code:
# Check unicity for the version being saved (draft or published)
is_draft = self.extended_object.publisher_is_draft
uniqueness_query = self.__class__.objects.filter(
code=self.code, extended_object__publisher_is_draft=is_draft
)
# If the page is being updated, we should exclude it while looking for duplicates
if self.pk:
# pylint: disable=no-member
uniqueness_query = uniqueness_query.exclude(pk=self.pk)
# Raise a ValidationError if the code already exists
if uniqueness_query.exists():
raise ValidationError(
{"code": ["An Organization already exists with this code."]}
)
return super().validate_unique(exclude=exclude)
def save(self, *args, **kwargs):
"""
Enforce validation on each instance save
"""
self.full_clean()
super().save(*args, **kwargs)
class Course(BasePageExtension):
"""
The course page extension represents and records a course in the catalog.
This model should be used to record structured data about the course whereas the
associated page object is where we record the less structured information to display on the
page that presents the course.
The `active_session` field is the edX course_key of the current session.
"""
active_session = models.CharField(
max_length=200,
verbose_name=_("Course key of active course session"),
blank=True,
null=True,
db_index=True,
)
organization_main = models.ForeignKey(
"Organization",
related_name="main_courses",
limit_choices_to={"extended_object__publisher_is_draft": True},
)
organizations = models.ManyToManyField(
"Organization",
related_name="courses",
limit_choices_to={"extended_object__publisher_is_draft": True},
)
subjects = models.ManyToManyField(
"Subject",
related_name="courses",
blank=True,
limit_choices_to={"extended_object__publisher_is_draft": True},
)
ROOT_REVERSE_ID = "courses"
TEMPLATE_DETAIL = "courses/cms/course_detail.html"
class Meta:
verbose_name = _("course")
def __str__(self):
"""Human representation of a course."""
session = self.active_session or "no active session"
return "{model}: {title} ({session})".format(
model=self._meta.verbose_name.title(),
title=self.extended_object.get_title(),
session=session,
)
def copy_relations(self, oldinstance, language):
"""
We must manually copy the many-to-many relations from the "draft" instance
to the "published" instance.
"""
# pylint: disable=no-member
self.organizations.set(oldinstance.organizations.drafts())
self.subjects.set(oldinstance.subjects.drafts())
def validate_unique(self, exclude=None):
"""
We can't rely on a database constraint for uniqueness because pages
exist in two versions: draft and published.
"""
if self.active_session:
# Check uniqueness for the version being saved (draft or published)
is_draft = self.extended_object.publisher_is_draft
uniqueness_query = self.__class__.objects.filter(
active_session=self.active_session,
extended_object__publisher_is_draft=is_draft,
)
# If the page is being updated, we should exclude it while looking for duplicates
if self.pk:
# pylint: disable=no-member
uniqueness_query = uniqueness_query.exclude(pk=self.pk)
# Raise a ValidationError if the active session already exists
if uniqueness_query.exists():
raise ValidationError(
{
"active_session": [
"A course already exists with this active session."
]
}
)
return super().validate_unique(exclude=exclude)
def save(self, *args, **kwargs):
"""
Enforce validation each time an instance is saved
Make sure the main organization is also included in `organizations` as a m2m relation
"""
self.full_clean()
super().save(*args, **kwargs)
if self.pk:
# pylint: disable=no-member
self.organizations.add(self.organization_main)
class Subject(BasePageExtension):
"""
The subject page extension represents and records a thematic in the catalog.
This model should be used to record structured data about the thematic whereas the
associated page object is where we record the less structured information to display on the
page that presents the thematic.
"""
ROOT_REVERSE_ID = "subjects"
TEMPLATE_DETAIL = "courses/cms/subject_detail.html"
class Meta:
verbose_name = _("subject")
def __str__(self):
"""Human representation of a subject"""
return "{model}: {title}".format(
model=self._meta.verbose_name.title(),
title=self.extended_object.get_title(),
)
def copy_relations(self, oldinstance, language):
"""
We must manually copy the many-to-many relations from the "draft" instance
to the "published" instance.
"""
self.courses.set(oldinstance.courses.drafts())
extension_pool.register(Course)
extension_pool.register(Organization)
extension_pool.register(Subject)
|
################################################################################
# populate_obs_instrument_VGISS_prof.py
#
# Routines to populate fields specific to VGISS.
################################################################################
import pdsfile
from config_data import *
import import_util
from populate_obs_mission_voyager import *
from populate_util import *
################################################################################
# THESE NEED TO BE IMPLEMENTED FOR EVERY INSTRUMENT
################################################################################
### OBS_GENERAL TABLE ###
def _VGISS_file_spec_helper(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
file_spec = index_row['FILE_SPECIFICATION_NAME']
volume_id = kwargs['volume_id']
return volume_id + '/' + file_spec
def populate_obs_general_VGISS_opus_id_PROF(**kwargs):
file_spec = _VGISS_file_spec_helper(**kwargs)
pds_file = pdsfile.PdsFile.from_filespec(file_spec, fix_case=True)
opus_id = pds_file.opus_id
if not opus_id:
import_util.log_nonrepeating_error(
f'Unable to create OPUS_ID for FILE_SPEC "{file_spec}"')
return file_spec.split('/')[-1]
return opus_id
def populate_obs_general_VGISS_ring_obs_id_PROF(**kwargs):
return None
def populate_obs_general_VGISS_inst_host_id_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inst_host = index_row['INSTRUMENT_HOST_NAME']
assert inst_host in ['VOYAGER 1', 'VOYAGER 2']
return 'VG'+inst_host[-1]
# VGISS time span is the duration of the observation at the spacecraft
def populate_obs_general_VGISS_time1_PROF(**kwargs):
return populate_time1_from_index(**kwargs)
def populate_obs_general_VGISS_time2_PROF(**kwargs):
return populate_time2_from_index(**kwargs)
def populate_obs_general_VGISS_target_name_PROF(**kwargs):
# Get target name from index table
target_name = populate_target_name_from_index(**kwargs)
target_name_info = TARGET_NAME_INFO[target_name]
return target_name, target_name_info[2]
def populate_obs_general_VGISS_observation_duration_PROF(**kwargs):
return populate_observation_duration_from_time(**kwargs)
def populate_obs_general_VGISS_quantity_PROF(**kwargs):
return 'REFLECT'
def populate_obs_general_VGISS_observation_type_PROF(**kwargs):
# Reflectance
return 'REF'
def populate_obs_pds_VGISS_note_PROF(**kwargs):
return None
def populate_obs_general_VGISS_primary_file_spec_PROF(**kwargs):
return _VGISS_file_spec_helper(**kwargs)
def populate_obs_pds_VGISS_primary_file_spec_PROF(**kwargs):
return _VGISS_file_spec_helper(**kwargs)
def populate_obs_pds_VGISS_product_creation_time_PROF(**kwargs):
return populate_product_creation_time_from_supp_index(**kwargs)
# Format: "VG2-SR/UR/NR-ISS-2/4-OCC-V1.0"
def populate_obs_pds_VGISS_data_set_id_PROF(**kwargs):
return populate_data_set_id_from_index_label(**kwargs)
# Format: "KM001/UU1P01DE.TAB"
def populate_obs_pds_VGISS_product_id_PROF(**kwargs):
return populate_product_id_from_index(**kwargs)
def populate_obs_general_VGISS_right_asc1_PROF(**kwargs):
return None
def populate_obs_general_VGISS_right_asc2_PROF(**kwargs):
return None
def populate_obs_general_VGISS_declination1_PROF(**kwargs):
return None
def populate_obs_general_VGISS_declination2_PROF(**kwargs):
return None
### OBS_TYPE_IMAGE TABLE ###
def populate_obs_type_image_VGISS_image_type_id_PROF(**kwargs):
return 'FRAM'
def populate_obs_type_image_VGISS_duration_PROF(**kwargs):
return 0.72
def populate_obs_type_image_VGISS_levels_PROF(**kwargs):
return 256
def populate_obs_type_image_VGISS_lesser_pixel_size_PROF(**kwargs):
return 800
def populate_obs_type_image_VGISS_greater_pixel_size_PROF(**kwargs):
return 800
### OBS_WAVELENGTH TABLE ###
def populate_obs_wavelength_VGISS_wavelength1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['supp_index_row']
wl1 = index_row['MINIMUM_WAVELENGTH']
return wl1
def populate_obs_wavelength_VGISS_wavelength2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['supp_index_row']
wl2 = index_row['MAXIMUM_WAVELENGTH']
return wl2
def _wave_res_helper(**kwargs):
metadata = kwargs['metadata']
wl_row = metadata['obs_wavelength_row']
wl1 = wl_row['wavelength1']
wl2 = wl_row['wavelength2']
if wl1 is None or wl2 is None:
return None
return wl2 - wl1
def populate_obs_wavelength_VGISS_wave_res1_PROF(**kwargs):
return _wave_res_helper(**kwargs)
def populate_obs_wavelength_VGISS_wave_res2_PROF(**kwargs):
return _wave_res_helper(**kwargs)
def populate_obs_wavelength_VGISS_wave_no1_PROF(**kwargs):
metadata = kwargs['metadata']
wavelength_row = metadata['obs_wavelength_row']
wl2 = wavelength_row['wavelength2']
if wl2 is None:
return None
return 10000 / wl2 # cm^-1
def populate_obs_wavelength_VGISS_wave_no2_PROF(**kwargs):
metadata = kwargs['metadata']
wavelength_row = metadata['obs_wavelength_row']
wl1 = wavelength_row['wavelength1']
if wl1 is None:
return None
return 10000 / wl1 # cm^-1
# Same logic as wave_res
def _wave_no_res_helper(**kwargs):
metadata = kwargs['metadata']
wl_row = metadata['obs_wavelength_row']
wno1 = wl_row['wave_no1']
wno2 = wl_row['wave_no2']
if wno1 is None or wno2 is None:
return None
return wno2 - wno1
def populate_obs_wavelength_VGISS_wave_no_res1_PROF(**kwargs):
return _wave_no_res_helper(**kwargs)
def populate_obs_wavelength_VGISS_wave_no_res2_PROF(**kwargs):
return _wave_no_res_helper(**kwargs)
def populate_obs_wavelength_VGISS_spec_flag_PROF(**kwargs):
return 'N'
def populate_obs_wavelength_VGISS_spec_size_PROF(**kwargs):
return None
def populate_obs_wavelength_VGISS_polarization_type_PROF(**kwargs):
return 'NONE'
### OBS_PROFILE TABLE ###
def populate_obs_occultation_VGISS_occ_type_PROF(**kwargs):
# Reflectance
return 'REF'
def populate_obs_occultation_VGISS_occ_dir_PROF(**kwargs):
return None
def populate_obs_occultation_VGISS_body_occ_flag_PROF(**kwargs):
return None
def populate_obs_occultation_VGISS_optical_depth_min_PROF(**kwargs):
return None
def populate_obs_occultation_VGISS_optical_depth_max_PROF(**kwargs):
return None
def populate_obs_occultation_VGISS_temporal_sampling_PROF(**kwargs):
return None
def populate_obs_occultation_VGISS_quality_score_PROF(**kwargs):
return 'GOOD'
def populate_obs_occultation_VGISS_wl_band_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
wl_band1 = index_row['WAVELENGTH_BAND_1']
wl_band2 = index_row['WAVELENGTH_BAND_2']
if wl_band2 != 'N/A':
assert wl1_band1 == wl2_band2, 'Mismatched wl_band1 and wl_band2.'
if '-BAND' in wl_band1:
wl_band1 = wl_band1[0]
if 'VISUAL' in wl_band1:
wl_band1 = 'VI'
return wl_band1
def populate_obs_occultation_VGISS_source_PROF(**kwargs):
return None
def populate_obs_occultation_VGISS_host_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
receiver_host = index_row['RECEIVER_HOST_NAME']
return receiver_host
### OBS_RING_GEOMETRY TABLE ###
def populate_obs_ring_geometry_VGISS_ring_radius1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
radius1 = import_util.safe_column(index_row, 'MINIMUM_RING_RADIUS')
return radius1
def populate_obs_ring_geometry_VGISS_ring_radius2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
radius2 = import_util.safe_column(index_row, 'MAXIMUM_RING_RADIUS')
return radius2
def populate_obs_ring_geometry_VGISS_resolution1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
res = import_util.safe_column(index_row, 'MINIMUM_RADIAL_RESOLUTION')
return res
def populate_obs_ring_geometry_VGISS_resolution2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
res = import_util.safe_column(index_row, 'MAXIMUM_RADIAL_RESOLUTION')
return res
def populate_obs_ring_geometry_VGISS_proj_resolution1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
res = import_util.safe_column(index_row, 'MINIMUM_RADIAL_RESOLUTION')
return res
def populate_obs_ring_geometry_VGISS_proj_resolution2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
res = import_util.safe_column(index_row, 'MAXIMUM_RADIAL_RESOLUTION')
return res
def populate_obs_ring_geometry_VGISS_j2000_longitude1_PROF(**kwargs):
return None
def populate_obs_ring_geometry_VGISS_j2000_longitude2_PROF(**kwargs):
return None
def populate_obs_ring_geometry_VGISS_ring_azimuth_wrt_observer1_PROF(**kwargs):
return None
def populate_obs_ring_geometry_VGISS_ring_azimuth_wrt_observer2_PROF(**kwargs):
return None
def populate_obs_ring_geometry_VGISS_phase1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
phase_angle = index_row['MINIMUM_PHASE_ANGLE']
return phase_angle
def populate_obs_ring_geometry_VGISS_phase2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
phase_angle = index_row['MAXIMUM_PHASE_ANGLE']
return phase_angle
# Source: Sun, observer: Voyager. For both of the reflectance profiles,
# the Sun was illuminating the north side of the rings and Voyager was
# observing the rings from the south side. Thus the incidence/emission
# angles and the north-based incidence/emission angles will
# be the same.
# Incidence angle: The angle between the point where the incoming source
# photos hit the ring and the normal to the ring plane on the LIT side of
# the ring. This is always between 0 (parallel to the normal vector) and 90
# (parallel to the ring plane)
def populate_obs_ring_geometry_VGISS_incidence1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inc = index_row['INCIDENCE_ANGLE']
return inc
def populate_obs_ring_geometry_VGISS_incidence2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inc = index_row['INCIDENCE_ANGLE']
return inc
# North based inc: the angle between the point where incoming source photons hit
# the ring to the normal vector on the NORTH side of the ring. 0-90 when north
# side of the ring is lit, and 90-180 when south side is lit.
def populate_obs_ring_geometry_VGISS_north_based_incidence1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inc = index_row['INCIDENCE_ANGLE']
return inc
def populate_obs_ring_geometry_VGISS_north_based_incidence2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inc = index_row['INCIDENCE_ANGLE']
return inc
# Emission angle: the angle between the normal vector on the LIT side, to the
# direction where outgoing photons to the observer. 0-90 when observer is at the
# lit side of the ring, and 90-180 when it's at the dark side.
def populate_obs_ring_geometry_VGISS_emission1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
ea = index_row['MINIMUM_EMISSION_ANGLE']
return ea
def populate_obs_ring_geometry_VGISS_emission2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
ea = index_row['MAXIMUM_EMISSION_ANGLE']
return ea
# North based ea: the angle between the normal vector on the NORTH side of the
# ring, to the direction where outgoing photons to the observer. 0-90 when
# observer is at the north side of the ring, and 90-180 when it's at the south
# side.
def populate_obs_ring_geometry_VGISS_north_based_emission1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
ea = index_row['MINIMUM_EMISSION_ANGLE']
return ea
def populate_obs_ring_geometry_VGISS_north_based_emission2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
ea = index_row['MAXIMUM_EMISSION_ANGLE']
return ea
# We set the center versions to be the same as the normal versions
populate_obs_ring_geometry_VGISS_center_phase1_PROF = \
populate_obs_ring_geometry_VGISS_phase1_PROF
populate_obs_ring_geometry_VGISS_center_phase2_PROF = \
populate_obs_ring_geometry_VGISS_phase2_PROF
populate_obs_ring_geometry_VGISS_center_incidence1_PROF = \
populate_obs_ring_geometry_VGISS_incidence1_PROF
populate_obs_ring_geometry_VGISS_center_incidence2_PROF = \
populate_obs_ring_geometry_VGISS_incidence2_PROF
populate_obs_ring_geometry_VGISS_center_emission1_PROF = \
populate_obs_ring_geometry_VGISS_emission1_PROF
populate_obs_ring_geometry_VGISS_center_emission2_PROF = \
populate_obs_ring_geometry_VGISS_emission2_PROF
populate_obs_ring_geometry_VGISS_center_north_based_incidence1_PROF = \
populate_obs_ring_geometry_VGISS_north_based_incidence1_PROF
populate_obs_ring_geometry_VGISS_center_north_based_incidence2_PROF = \
populate_obs_ring_geometry_VGISS_north_based_incidence2_PROF
populate_obs_ring_geometry_VGISS_center_north_based_emission1_PROF = \
populate_obs_ring_geometry_VGISS_north_based_emission1_PROF
populate_obs_ring_geometry_VGISS_center_north_based_emission2_PROF = \
populate_obs_ring_geometry_VGISS_north_based_emission2_PROF
# Opening angle to observer: the angle between the ring surface to the direction
# where outgoing photons to the observer. Positive if observer is at the north
# side of the ring , negative if it's at the south side. In this case, observer
# is at the south side, so it's 90 - north based ea.
def populate_obs_ring_geometry_VGISS_observer_ring_opening_angle1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
max_ea = index_row['MAXIMUM_EMISSION_ANGLE']
return 90. - max_ea
def populate_obs_ring_geometry_VGISS_observer_ring_opening_angle2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
min_ea = index_row['MINIMUM_EMISSION_ANGLE']
return 90. - min_ea
# Ring elevation to observer, same to opening angle except, it's positive if
# observer is at north side of Jupiter, Saturn, and Neptune, and south side of
# Uranus. Negative if observer is at south side of Jupiter, Saturn, and Neptune,
# and north side of Uranus. In this volume, observer is at the south of Saturn,
# so ring elevation will be the same as opening angle.
def populate_obs_ring_geometry_VGISS_observer_ring_elevation1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
max_ea = index_row['MAXIMUM_EMISSION_ANGLE']
return 90. - max_ea
def populate_obs_ring_geometry_VGISS_observer_ring_elevation2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
min_ea = index_row['MINIMUM_EMISSION_ANGLE']
return 90. - min_ea
# Opening angle to Sun: the angle between the ring surface to the direction
# where incoming photons from the source. Positive if source is at the north
# side of the ring, negative if it's at the south side. In this case, source
# is at the north side, so it's 90 - inc. For reference, if source is at the
# south side, then oa is - (90 - inc).
def populate_obs_ring_geometry_VGISS_solar_ring_opening_angle1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inc = index_row['INCIDENCE_ANGLE']
return 90. - inc
def populate_obs_ring_geometry_VGISS_solar_ring_opening_angle2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inc = index_row['INCIDENCE_ANGLE']
return 90. - inc
# Ring elevation to Sun, same to opening angle except, it's positive if
# source is at north side of Jupiter, Saturn, and Neptune, and south side of
# Uranus. Negative if source is at south side of Jupiter, Saturn, and Neptune,
# and north side of Uranus. In this volume, source is at north of Saturn,
# so ring elevation will be the same as opening angle.
def populate_obs_ring_geometry_VGISS_solar_ring_elevation1_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inc = index_row['INCIDENCE_ANGLE']
return 90. - inc
def populate_obs_ring_geometry_VGISS_solar_ring_elevation2_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inc = index_row['INCIDENCE_ANGLE']
return 90. - inc
def populate_obs_ring_geometry_VGISS_ring_intercept_time1_PROF(**kwargs):
return populate_time1_from_index(column='RING_EVENT_START_TIME', **kwargs)
def populate_obs_ring_geometry_VGISS_ring_intercept_time2_PROF(**kwargs):
return populate_time1_from_index(column='RING_EVENT_STOP_TIME', **kwargs)
################################################################################
# THESE NEED TO BE IMPLEMENTED FOR EVERY VOYAGER INSTRUMENT
################################################################################
def populate_obs_mission_voyager_VGISS_mission_phase_name_PROF(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
target_name = index_row['TARGET_NAME'].upper().strip()
mp = VG_TARGET_TO_MISSION_PHASE_MAPPING[target_name]
return mp
################################################################################
# THESE ARE SPECIFIC TO OBS_INSTRUMENT_VGISS
################################################################################
def populate_obs_instrument_vgiss_camera_PROF(**kwargs):
# Narrow angle camera
return 'N'
def populate_obs_instrument_vgiss_usable_lines_PROF(**kwargs):
return 800
def populate_obs_instrument_vgiss_usable_samples_PROF(**kwargs):
return 800
|
from gsse_python_client.series import Series
import numbers
class TimeWindows:
def __init__(self, timewindows):
self.timewindows = []
for timewindow in timewindows:
self.timewindows.append(TimeWindow(timewindow))
def __getitem__(self, item):
return self.timewindows[item]
def __setitem__(self, key, value):
self.timewindows[key] = value
def __len__(self):
return len(self.timewindows)
def __iter__(self):
for elem in self.timewindows:
yield elem
class TimeWindow:
def __init__(self, data):
self.series = {}
self.tickers = []
for element in data:
series = Series(element)
self.series[series.ticker] = series
self.tickers.append(series.ticker)
def getTick(self, ticker, tickIndex):
series = self.series[ticker]
return series.getTick(tickIndex)
def __getitem__(self, item):
if isinstance(item, numbers.Number):
return self.series[self.tickers[item]]
return self.series[item]
def __len__(self):
return len(self.series)
|
"""
This module contains the plugin interface that you need to implement
for writing new plugins.
Plugins are regular python packages that contains a set of setuptools
entrypoints that expose the available plugins.
Basically you need to define a dictionary where the keys are the possible
entrypoints (one per plugin type) and the values the list of python classes
that implement the entrypoint interface.
This module describes the entrypoints interface, each "interface" has a
``ENTRYPOINT`` attribute, that is the string you need to use in the entrypoints
dict in your setup.py.
"""
from PyQt5.QtCore import QObject
from ._shared import _window
class EditorPlugin:
"""
An editor plugin returns the CodeEdit class that needs to be registerd
inside the application.
"""
#: setuptools entry point to use for adding new editor plugins.
ENTRYPOINT = 'hackedit.plugins.editors'
@staticmethod
def get_editor_class():
"""
Returns the editor **class** to register.
:return: a subclass of :class:`pyqode.core.api.CodeEdit`
"""
pass
@classmethod
def get_specific_preferences_page(cls):
"""
Returns a preferences page to edit the settings specific to your
editor.
"""
pass
@classmethod
def apply_specific_preferences(cls, editor):
"""
Apply the specific preferences to an editor instance
"""
pass
class FileIconProviderPlugin:
"""
The file icon provider plugin provide a custom icon for a specific file
extension. Implement this only if the mimetype for your file is not widely
available in the icon themes.
To define such a plugin, you need to define a list of supported
extensions: :attr:SUPPORTED_EXTENSIONS and a function that will return the
actual QIcon instance: :func:`icon`.
"""
ENTRYPOINT = 'hackedit.plugins.file_icon_providers'
#: the list of supported file extensions. Use the '.' extension for
#: folders (e.g. if you need to display a custom icon for special folders
#: such as python packages).
SUPPORTED_EXTENSIONS = []
def icon(self, file_info):
"""
Returns the corresponding file icon
:param file_info: QFileInfo
:retype: PyQt5.QtGui.QIcon
"""
pass
class WorkspacePlugin(QObject):
"""
A workspace plugin is a generic window plugin but tied to a workspace.
To create a workspace plugin: create a subclass and implement the following
methods:
- activate: setup your plugin
- close: to stop any background task or close any resource that your
plugin might use.
- get_preferences_page (classmethod): returns a custom
:class:`hackedit.widgets.PreferencePage` instance that will show up
in the app's preferences dialog.
- apply_preferences: to apply any custom preferences exposed by
:func:`get_preferences_page` on the plugin instance.
"""
#: setuptools entry point to use for adding new editor plugins.
ENTRYPOINT = 'hackedit.plugins.workspace_plugins'
def __init__(self, window):
"""
:param window: Reference to the main window where the plugin has been
attached to.
:type window: hackedit.app.gui.main_window.MainWindow
"""
self.main_window = window
super().__init__()
def activate(self):
"""
Activates the plugin.
You should implement this method to setup your plugin (create widgets,
connect signals/slots,...)
"""
pass
def close(self):
"""
This method is called when the parent window has been closed.
Implemented this method if you need to cleanup some resources or stop
somes services,...
"""
pass
@classmethod
def get_preferences_page(cls):
"""
Returns the plugin config page. A page is a simple widget where you
expose your plugin's preferences. That page will be automatically shown
under the plugin node in the application's preferences dialog.
.. warning: This is a classmethod!
:rtype: hackedit.api.widgets.PreferencePage
"""
pass
def apply_preferences(self):
"""
Apply user preferences to your plugin (the one exposed in your config
page).
"""
pass
class WorkspaceProviderPlugin:
"""
A workspace provider plugin let you add some builtin workspaces to
HackEdit.
Just implement `get_data` to return a dictionary with the following
structure::
workspace = {
name: 'Your workspace name',
description: 'Your workspace description, can be multiline',
plugins: ['PluginA', 'PluginB', ...]
}
"""
#: setuptools entry point to use for adding new editor plugins.
ENTRYPOINT = 'hackedit.plugins.workspace_providers'
def get_data(self):
"""
Gets the workspace data dictionnary.
"""
pass
class SymbolParserPlugin:
"""
Plugin used to parse the symbols of a file.
The plugin must declare the mimetypes it can handle and implement
the ``parse`` method.
The parse method will parse the content of a file and return a list
of :class:`pyqode.core.share.Definition` that will be written to the
project's index database by the indexing backend.
"""
ENTRYPOINT = 'hackedit.plugins.symbol_parsers'
#: Specify the mimetypes that can be handled by a particular indexor
#: plugin
mimetypes = []
def parse(self, path):
"""
Parses a file and returns a list of
:class:`pyqode.core.share.Definition`.
This method will be called automatically when indexing files for any
file that match one of the supported mimetype.
"""
pass
class PreferencePagePlugin:
"""
A preference page plugin provides a :class:`PreferencePage` widget that
will get automatically shown in the application preferences dialog.
This preference page won't be tied to the plugins category (you're free to
define a category or not in your preferences page).
"""
#: setuptools entry point to use for adding new editor plugins.
ENTRYPOINT = 'hackedit.plugins.preference_pages'
@classmethod
def get_preferences_page(cls):
"""
Returns the preference page widget.
"""
pass
class TemplateProviderPlugin:
"""
A template provider plugin provides an additional source of templates
to the application.
"""
ENTRYPOINT = 'hackedit.plugins.template_providers'
def get_label(self):
"""
Gets the label of the provider. The label will appear in the list
of template sources. It must be carefully chosen.
"""
pass
def get_url(cls):
"""
Gets the template url. This can be a remote url (pointing to a git
repository) or a local url (pointing to the directory that contains the
templates)
"""
pass
def get_plugin_instance(plugin_class):
"""
Returns the plugin instance that match a given plugin **class**.
:param plugin_class: Plugin class
"""
return _window().get_plugin_instance(plugin_class)
def get_script_runner():
"""
Gets the script runner plugin instance if any otherwise returns None.
:rtype: hackedit.api.interpreters.ScriptRunnerPlugin
"""
from .interpreters import ScriptRunnerPlugin
return _window().get_plugin_instance(ScriptRunnerPlugin)
|
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def getPersonByUsername(self, username):
"""
Parameters:
- username
"""
pass
def savePerson(self, person):
"""
Parameters:
- person
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def getPersonByUsername(self, username):
"""
Parameters:
- username
"""
self.send_getPersonByUsername(username)
return self.recv_getPersonByUsername()
def send_getPersonByUsername(self, username):
self._oprot.writeMessageBegin('getPersonByUsername', TMessageType.CALL, self._seqid)
args = getPersonByUsername_args()
args.username = username
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getPersonByUsername(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getPersonByUsername_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.dataException is not None:
raise result.dataException
raise TApplicationException(TApplicationException.MISSING_RESULT, "getPersonByUsername failed: unknown result")
def savePerson(self, person):
"""
Parameters:
- person
"""
self.send_savePerson(person)
self.recv_savePerson()
def send_savePerson(self, person):
self._oprot.writeMessageBegin('savePerson', TMessageType.CALL, self._seqid)
args = savePerson_args()
args.person = person
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_savePerson(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = savePerson_result()
result.read(iprot)
iprot.readMessageEnd()
if result.dataException is not None:
raise result.dataException
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["getPersonByUsername"] = Processor.process_getPersonByUsername
self._processMap["savePerson"] = Processor.process_savePerson
self._on_message_begin = None
def on_message_begin(self, func):
self._on_message_begin = func
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if self._on_message_begin:
self._on_message_begin(name, type, seqid)
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_getPersonByUsername(self, seqid, iprot, oprot):
args = getPersonByUsername_args()
args.read(iprot)
iprot.readMessageEnd()
result = getPersonByUsername_result()
try:
result.success = self._handler.getPersonByUsername(args.username)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except DataException as dataException:
msg_type = TMessageType.REPLY
result.dataException = dataException
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getPersonByUsername", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_savePerson(self, seqid, iprot, oprot):
args = savePerson_args()
args.read(iprot)
iprot.readMessageEnd()
result = savePerson_result()
try:
self._handler.savePerson(args.person)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except DataException as dataException:
msg_type = TMessageType.REPLY
result.dataException = dataException
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("savePerson", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class getPersonByUsername_args(object):
"""
Attributes:
- username
"""
def __init__(self, username=None,):
self.username = username
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.username = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getPersonByUsername_args')
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 1)
oprot.writeString(self.username.encode('utf-8') if sys.version_info[0] == 2 else self.username)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.username is None:
raise TProtocolException(message='Required field username is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getPersonByUsername_args)
getPersonByUsername_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'username', 'UTF8', None, ), # 1
)
class getPersonByUsername_result(object):
"""
Attributes:
- success
- dataException
"""
def __init__(self, success=None, dataException=None,):
self.success = success
self.dataException = dataException
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Person()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.dataException = DataException()
self.dataException.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getPersonByUsername_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.dataException is not None:
oprot.writeFieldBegin('dataException', TType.STRUCT, 1)
self.dataException.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getPersonByUsername_result)
getPersonByUsername_result.thrift_spec = (
(0, TType.STRUCT, 'success', [Person, None], None, ), # 0
(1, TType.STRUCT, 'dataException', [DataException, None], None, ), # 1
)
class savePerson_args(object):
"""
Attributes:
- person
"""
def __init__(self, person=None,):
self.person = person
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.person = Person()
self.person.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('savePerson_args')
if self.person is not None:
oprot.writeFieldBegin('person', TType.STRUCT, 1)
self.person.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.person is None:
raise TProtocolException(message='Required field person is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(savePerson_args)
savePerson_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'person', [Person, None], None, ), # 1
)
class savePerson_result(object):
"""
Attributes:
- dataException
"""
def __init__(self, dataException=None,):
self.dataException = dataException
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.dataException = DataException()
self.dataException.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('savePerson_result')
if self.dataException is not None:
oprot.writeFieldBegin('dataException', TType.STRUCT, 1)
self.dataException.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(savePerson_result)
savePerson_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'dataException', [DataException, None], None, ), # 1
)
fix_spec(all_structs)
del all_structs
|
from pandas import DataFrame
class Compiler:
"""
Deals with Writing to files using Information gained from crawling
"""
@staticmethod
def decompile(data: DataFrame, load_file: str, save_file_path: str, prefix: str):
"""
Saves data given in data frames as a new file each to the given file_path.
:param data: The dataframe of the crawled files. Expects a format (start byte, end byte, size, confidence, file_type)
:param save_file_path: The path to where the file should be saved.
:param load_file: The file path for the file which contents should be decompiled
:param prefix: The prefix of the file to be saved.
:return:
"""
with open(load_file, 'rb') as load_file:
for index, row in data.iterrows():
start_byte = row["start_byte"]
size = row["size"]
file_type = row["file_type"]
load_file.seek(start_byte)
bytes = load_file.read(size)
with open(f"{save_file_path}\\{prefix}_{index}.{file_type}", "wb") as save_file:
save_file.write(bytes)
@staticmethod
def decompile_to_single_file(data: DataFrame, load_file: str, save_file_path: str, prefix: str, separation: bytearray=None):
"""
Decompilrs a frame to a single file, using a seperation bytearray between each segment.
:param data: The dataframe of the crawled files. Expects a format (start byte, end byte, size, confidence, file_type)
:param save_file_path: The path to where the file should be saved.
:param load_file: The file path for the file which contents should be decompiled
:param prefix: The prefix of the file to be saved.
:param separation: The separation bytearray that is pasted between each data point defined in data.
:return:
"""
if separation is None:
separation = bytearray()
else:
separation = bytearray(separation)
with open(load_file, 'rb') as load_file:
with open(f"{save_file_path}\\{prefix}_all.dat", "wb") as save_file:
for index, row in data.iterrows():
start_byte = row["start_byte"]
size = row["size"]
load_file.seek(start_byte)
data_bytes = load_file.read(size)
save_file.write(data_bytes)
save_file.write(separation)
@staticmethod
def decompile_to_data_frame(data: DataFrame, load_file: str):
"""
Decompiles the data into a given data frame, as a new data column.
:param data: The dataframe to load and save to
:param load_file: The file to load the data from.
:return:
"""
all_vals = []
with open(load_file, 'rb') as load_file:
for index, row in data.iterrows():
start_byte = row["start_byte"]
size = row["size"]
load_file.seek(start_byte)
all_vals.append(load_file.read(size))
data["data"] = all_vals
return data
|
import os
import numpy as np
from PIL import Image, ImageFilter
import pandas as pd
import pymesh
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
# Lighting noise transform
class TransLightning(object):
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone()\
.mul(alpha.view(1, 3).expand(3, 3))\
.mul(self.eigval.view(1, 3).expand(3, 3))\
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
# ImageNet statistics
imagenet_pca = {
'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),
'eigvec': torch.Tensor([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
# Define normalization and random disturb for input image
disturb = TransLightning(0.1, imagenet_pca['eigval'], imagenet_pca['eigvec'])
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def random_crop(im, x, y, w, h):
left = max(0, x + int(np.random.uniform(-0.1, 0.1) * w))
upper = max(0, y + int(np.random.uniform(-0.1, 0.1) * h))
right = min(im.size[0], x + int(np.random.uniform(0.9, 1.1) * w))
lower = min(im.size[1], y + int(np.random.uniform(0.9, 1.1) * h))
im_crop = im.crop((left, upper, right, lower))
return im_crop
def resize_pad(im, dim):
w, h = im.size
im = transforms.functional.resize(im, int(dim * min(w, h) / max(w, h)))
left = int(np.ceil((dim - im.size[0]) / 2))
right = int(np.floor((dim - im.size[0]) / 2))
top = int(np.ceil((dim - im.size[1]) / 2))
bottom = int(np.floor((dim - im.size[1]) / 2))
im = transforms.functional.pad(im, (left, top, right, bottom))
return im
def read_pointcloud(model_path, point_num):
# read in original point cloud
point_cloud_raw = pymesh.load_mesh(model_path).vertices
# randomly select a fix number of points on the surface
point_subset = np.random.choice(point_cloud_raw.shape[0], point_num, replace=False)
point_cloud = point_cloud_raw[point_subset]
point_cloud = torch.from_numpy(point_cloud.transpose()).float()
# normalize the point cloud into [0, 1]
point_cloud = point_cloud - torch.min(point_cloud)
point_cloud = point_cloud / torch.max(point_cloud)
return point_cloud
# ================================================= #
# Datasets used for training
# ================================================= #
class Pascal3D(data.Dataset):
def __init__(self, root_dir, annotation_file, input_dim=224, point_num=2500, train=True,
keypoint=True, novel=True, cls_choice=None, shot=None):
self.train = train
self.root_dir = root_dir
self.input_dim = input_dim
self.point_num = point_num
# load the data frame for annotations
frame = pd.read_csv(os.path.join(root_dir, annotation_file))
frame = frame[frame.elevation != 90]
frame = frame[frame.difficult == 0]
if annotation_file == 'ObjectNet3D.txt':
frame.azimuth = (360. + frame.azimuth) % 360
# evaluation only on non-occluded and non-truncated objects with keypoint annotations as MetaView/StarMap
if train:
frame = frame[frame.set == 'train']
else:
frame = frame[frame.set == 'val']
frame = frame[frame.truncated == 0]
frame = frame[frame.occluded == 0]
frame = frame[frame.has_keypoints == 1]
# we exclude training samples without keypoint annotations for a fair comparison with MetaView
if train and keypoint:
frame = frame[frame.has_keypoints == 1]
# exclude novel classes for training and include it for testing
if cls_choice is not None:
if train:
frame = frame[~frame.cat.isin(cls_choice)] if novel else frame
else:
frame = frame[frame.cat.isin(cls_choice)]
# sample K-shot training data
if train and shot is not None:
categories = np.unique(frame.cat)
fewshot_frame = []
for cat in categories:
fewshot_frame.append(frame[frame.cat == cat].sample(n=shot))
frame = pd.concat(fewshot_frame)
self.annotation_frame = frame
# define data augmentation and preprocessing for RGB images in training
self.im_augmentation = transforms.Compose([
transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),
transforms.ToTensor(), normalize, disturb])
# define data preprocessing for RGB images in validation
self.im_transform = transforms.Compose([transforms.ToTensor(), normalize])
# define data preprocessing for rendered multi view images
self.render_transform = transforms.ToTensor()
if input_dim != 224:
self.render_transform = transforms.Compose([transforms.Resize(input_dim), transforms.ToTensor()])
def __len__(self):
return len(self.annotation_frame)
def __getitem__(self, idx):
# get image path and category
img_path = os.path.join(self.root_dir, self.annotation_frame.iloc[idx, -1])
cls = self.annotation_frame.iloc[idx]['cat']
# randomly choose a shape or an exemplar shape
cls_index = np.unique(self.annotation_frame[self.annotation_frame.cat == cls].cad_index)
cad_index = np.random.choice(cls_index)
left = self.annotation_frame.iloc[idx]['left']
upper = self.annotation_frame.iloc[idx]['upper']
right = self.annotation_frame.iloc[idx]['right']
lower = self.annotation_frame.iloc[idx]['lower']
# use continue viewpoint annotation
label = self.annotation_frame.iloc[idx, 9:12].values
# load real images in a Tensor of size C*H*W
im = Image.open(img_path).convert('RGB')
if self.train:
# Gaussian blur
if min(right - left, lower - upper) > 224 and np.random.random() < 0.3:
im = im.filter(ImageFilter.GaussianBlur(3))
# crop the original image with 2D bounding box jittering
im = random_crop(im, left, upper, right - left, lower - upper)
# Horizontal flip
if np.random.random() > 0.5:
im = im.transpose(Image.FLIP_LEFT_RIGHT)
label[0] = 360 - label[0]
label[2] = -label[2]
# Random rotation
if np.random.random() > 0.5:
r = max(-60, min(60, np.random.randn() * 30))
im = im.rotate(r)
label[2] = label[2] + r
label[2] += 360 if label[2] < -180 else (-360 if label[2] > 180 else 0)
# pad it to the desired size
im = resize_pad(im, self.input_dim)
im = self.im_augmentation(im)
else:
# crop the ground truth bounding box and pad it to the desired size
im = im.crop((left, upper, right, lower))
im = resize_pad(im, self.input_dim)
im = self.im_transform(im)
# transform the ground-truth angle values into [0, 360)
label[0] = (360. - label[0]) % 360.
label[1] = label[1] + 90.
label[2] = (label[2] + 180.) % 360.
label = label.astype('int')
# load point_clouds
pointcloud_path = os.path.join(self.root_dir, 'Pointclouds', cls, '{:02d}'.format(cad_index), 'compressed.ply')
point_cloud = read_pointcloud(pointcloud_path, self.point_num)
if self.train:
return im, point_cloud, label, cls
else:
return im, point_cloud, label
|
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .OrderItems import OrderItems
from .Filters import Filters
from .PlatformOrderPage import PlatformOrderPage
from .AppliedFilters import AppliedFilters
class OrderListing(BaseSchema):
# Order swagger.json
items = fields.List(fields.Nested(OrderItems, required=False), required=False)
filters = fields.Nested(Filters, required=False)
next_order_status = fields.Dict(required=False)
page = fields.Nested(PlatformOrderPage, required=False)
applied_filters = fields.Nested(AppliedFilters, required=False)
|
# Generated by Django 3.2.6 on 2021-11-03 10:24
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('review', '0004_auto_20211014_1426'),
]
operations = [
migrations.AlterField(
model_name='review',
name='reviewImage',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image'),
),
]
|
import numpy as np
import cv2
import os
class Arducam():
_name = 'Arducam'
def _read_frame(self):
ret = False
while ret == False:
ret, image = self._cam.read()
# split
left = image[0:self.image_size_[1], 0:self.image_size_[0]]
right = image[0:self.image_size_[1],
self.image_size_[0]:2*self.image_size_[0]]
Uleft = cv2.remap(left, self.left_map_1_,
self.left_map_2_, cv2.INTER_LINEAR)
Uright = cv2.remap(right, self.right_map_1_,
self.right_map_2_, cv2.INTER_LINEAR)
Uleft = Uleft[self.matchedRoi1_[1]: self.matchedRoi1_[
1] + self.matchedRoi1_[3], self.matchedRoi1_[0]:self.matchedRoi1_[0]+self.matchedRoi1_[2]]
Uright = Uright[self.matchedRoi2_[1]: self.matchedRoi2_[
1] + self.matchedRoi2_[3], self.matchedRoi2_[0]:self.matchedRoi2_[0]+self.matchedRoi2_[2]]
cv2.imshow("testleft", Uleft)
cv2.imshow("testR", Uright)
# left = np.repeat(left, 3, axis=-1)
# right = np.repeat(right, 3, axis=-1)
return Uleft, Uright
def _connect_to_camera(self):
# import params
self._import_params()
# calculate rectification matrices and maps for undistorting
self.R1_, self.R2_, self.P1_, self.P2_, self.Q_, self.validRoi1_, self.validRoi2_ = cv2.stereoRectify(
self.left_camera_matrix_, self.left_distortion_coefficients_, self.right_camera_matrix_, self.right_distortion_coefficients_, self.image_size_, self.R_, self.T_)
self.left_map_1_, self.left_map_2_ = cv2.initUndistortRectifyMap(
self.left_camera_matrix_, self.left_distortion_coefficients_, self.R1_, self.P1_, self.image_size_, cv2.CV_16SC2)
self.right_map_1_, self.right_map_2_ = cv2.initUndistortRectifyMap(
self.right_camera_matrix_, self.right_distortion_coefficients_, self.R2_, self.P2_, self.image_size_, cv2.CV_16SC2)
# connect to cam
self._cam = cv2.VideoCapture(0, cv2.CAP_GSTREAMER)
self._cam.set(cv2.CAP_PROP_FRAME_WIDTH, self.image_size_[0] * 2)
self._cam.set(cv2.CAP_PROP_FRAME_HEIGHT, self.image_size_[1])
os.system("v4l2-ctl --set-ctrl=gain=5")
self._calculate_matched_roi()
def _import_params(self):
fs = cv2.FileStorage("CalibParams_Stereo_4cm.yml",
cv2.FILE_STORAGE_READ)
if (fs.isOpened()):
self.left_camera_matrix_ = fs.getNode("left_camera_matrix").mat()
self.left_distortion_coefficients_ = fs.getNode(
"left_distortion_coefficients").mat()
self.right_camera_matrix_ = fs.getNode("right_camera_matrix").mat()
self.right_distortion_coefficients_ = fs.getNode(
"right_distortion_coefficients").mat()
self.R_ = fs.getNode("R").mat()
self.T_ = fs.getNode("T").mat()
self.E_ = fs.getNode("E").mat()
self.F_ = fs.getNode("F").mat()
self.image_size_ = (int(fs.getNode("image_width").real()), int(
fs.getNode("image_height").real()))
fs.release()
else:
print("calibration file could not be opened")
def _calculate_matched_roi(self):
new_y_loc = max(self.validRoi1_[1], self.validRoi2_[1])
new_x_loc_right = max(self.validRoi2_[0], self.image_size_[
0] - self.validRoi1_[0] - self.validRoi1_[2])
new_height = min(self.validRoi1_[3] - (new_y_loc - self.validRoi1_[
1]), self.validRoi2_[3] - (new_y_loc - self.validRoi2_[1]))
new_width = min(min((self.validRoi1_[0] + self.validRoi1_[2]) - new_x_loc_right, self.validRoi2_[
2]), self.image_size_[0] - self.validRoi2_[0] - new_x_loc_right)
self.matchedRoi1_ = (self.image_size_[
0] - new_x_loc_right - new_width, new_y_loc, new_width, new_height)
self.matchedRoi2_ = (new_x_loc_right, new_y_loc, new_width, new_height)
def _disconnect_from_camera(self):
self._cam.release()
cam= Arducam()
cam._connect_to_camera()
os.system("v4l2-ctl --set-ctrl=gain=5")
while(True):
cam._read_frame()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
# -*- coding: utf-8 -*-
# Path to Bochs debugger. Bochs must be compiled with --enable-debugger and --with-nogui.
default_bochs = 'bochs'
|
import time
import tweepy
from data import Data
from flask import Flask, render_template
app = Flask(__name__)
data = Data()
@app.route("/")
def index():
return render_template(
"index.html",
herd=data.getHerd(),
vaccinations=data.getVacc(),
casesDeaths=data.getCasesDeaths(),
)
app.run()
|
import functools
from rest_framework.response import Response
class BasePermissionDecorator(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, obj_type):
return functools.partial(self.__call__, obj)
def error(self, data):
return Response({"error": "permission-denied", "data": data, 'status': 403})
def __call__(self, *args, **kwargs):
self.request = args[1]
if self.check_permission():
# if self.request.user.is_active:
# return self.error("Your account is disabled")
return self.func(*args, **kwargs)
else:
return self.error("Please login first")
def check_permission(self):
raise NotImplementedError()
class login_required(BasePermissionDecorator):
def check_permission(self):
return self.request.user.is_authenticated
|
from collections import deque
class AnyPoppableDeque(deque):
def __init__(self):
super().__init__()
def pop_at_any_pos(self, pos):
value = self[pos]
del self[pos]
return value
class RangeParam(object):
def __init__(self, block_id=None, headers=None):
self.block_id = block_id
self.headers = headers
|
""" drivers and functions for I/O
"""
from __future__ import unicode_literals
from builtins import open, str
import os
import time
import json
import subprocess
from itertools import chain
from itertools import starmap
from functools import partial
import pandas
from .strid import canonical as canonical_species_identifier
from .strid import canonical_reaction_identifier
from .iohelp import abstraction_candidate
from .iohelp import abstraction
from .iohelp import abstraction_xyz_strings
from .iohelp import abstraction_input_string
from .iohelp import addition_candidate
from .iohelp import addition
from .iohelp import addition_xyz_strings
from .iohelp import addition_input_string
from .iohelp import migration_candidate
from .iohelp import migration
from .iohelp import migration_xyz_strings
from .iohelp import migration_input_string
from .table import set_column as set_table_column
from .table import is_empty_value as is_empty_table_value
from .table import has_column_keys as table_has_column_keys
from .table import column as table_column
from .table import columns as table_columns
from .table import lookup_dictionary as table_lookup_dictionary
from .table import lookup_row as table_lookup_row
from .table import lookup_update as table_lookup_update
from .table import columns_like as table_with_columns_like
from .table import update_column_keys as update_table_column_keys
from .table import append_rows as append_table_row_dicts
from .table import append_columns as append_table_columns
from .table import column_keys as table_column_keys
from .table import iterate_rows as iterate_table_row_dicts
from .table import from_columns as table_from_columns
from .table import from_rows as table_from_row_dicts
from .table import reindex as reindex_table
from .table import sort as sort_table
from .table import merge as merge_tables
from .table import intersect as intersect_tables
from .table import move_column_to_front as move_table_column_to_front
# from new table interface
from .table2 import EMPTY
from .table2 import row_indices
from .table2 import update_column_by_index
from .table2 import sql_where_eq
from .table2 import sql_where_in
from .table2 import sql_select_one
# from new parsing module
from .parse.rere.find import split
from .parse.rere.find import strip_spaces
# threading
from .thread import tag_team_starmap
ADD_XYZ_EXTENSION = '{:s}.xyz'.format
SID_COL_KEY = 'species_id'
GEOM_PATH_COL_KEY = 'geom_path'
RXN_IDX_COL_KEY = 'index'
RID_COL_KEY = 'reaction_id'
RXN_PATH_COL_KEY = 'path'
RXN_STAT_COL_KEY = 'status'
RXN_CREATED_VAL = 'created'
RXN_NOT_CREATED_VAL = 'not created'
RXN_RAN_VAL = 'ran'
RXN_FAILED_VAL = 'failed'
ARRH_COL_KEYS = ('arrh_a', 'arrh_b', 'arrh_e')
NASA_LO_COL_KEYS = ('nasa_lo_1', 'nasa_lo_2', 'nasa_lo_3', 'nasa_lo_4',
'nasa_lo_5', 'nasa_lo_6', 'nasa_lo_7')
NASA_HI_COL_KEYS = ('nasa_hi_1', 'nasa_hi_2', 'nasa_hi_3', 'nasa_hi_4',
'nasa_hi_5', 'nasa_hi_6', 'nasa_hi_7')
NASA_T_COL_KEYS = ('nasa_t_com', 'nasa_t_lo', 'nasa_t_hi')
REACTANT_SID_COL_KEYS = (
('addition', ('x', 'y')),
('abstraction', ('q1h', 'q2')),
('migration', ('r',))
)
PRODUCT_SID_COL_KEYS = (
('addition', ('xy')),
('abstraction', ('q1', 'q2h')),
('migration', ('p',))
)
REACTION_SID_COL_KEYS = (
('addition', ('x', 'y', 'xy')),
('abstraction', ('q1h', 'q2', 'q1', 'q2h')),
('migration', ('r', 'p'))
)
REACTION_IDX_COL_KEYS = (
('addition', ('x_idx', 'y_idx', 'xy_idx_x', 'xy_idx_y')),
('abstraction', ('q1h_idx', 'q2_idx', 'q1_idx', 'q2h_idx')),
('migration', ('r_idx_h', 'r_idx_a', 'p_idx_h', 'p_idx_a'))
)
REACTION_CANDIDATE_FINDERS = (
('addition', addition_candidate),
('abstraction', abstraction_candidate),
('migration', migration_candidate)
)
REACTION_FINDERS = (
('addition', addition),
('abstraction', abstraction),
('migration', migration)
)
REACTION_XYZ_STRING_MAKERS = (
('addition', addition_xyz_strings),
('abstraction', abstraction_xyz_strings),
('migration', migration_xyz_strings)
)
REACTION_INPUT_STRING_MAKERS = (
('addition', addition_input_string),
('abstraction', abstraction_input_string),
('migration', migration_input_string)
)
def init(mech_txt, spc_csv, rxn_csv_out, spc_csv_out, geom_dir, id2path,
therm_txt, without_thermo, logger):
""" initialize a mechanism from a CHEMKIN mechanism file
"""
from .iohelp import translate_chemkin_reaction
from .iohelp import thermo_value_dictionary
from .pchemkin import reactions as chemkin_reactions
from .pchemkin import thermo_block as chemkin_thermo_block
from .pchemkin import therm_data_strings as chemkin_therm_data_strings
logger.info("Reading in {:s}".format(mech_txt))
mech_str = read_file(mech_txt)
logger.info("Reading in {:s}".format(spc_csv))
spc_df = pandas.read_csv(spc_csv)
spcs = tuple(spc_df['species'])
sids = tuple(map(canonical_species_identifier, spc_df['species_id']))
sid_dct = dict(zip(spcs, sids))
spc_df['species_id'] = sids
if not without_thermo:
if therm_txt is None and chemkin_thermo_block(mech_str):
therm_str = mech_str
else:
logger.info("Reading in {:s}".format(therm_txt))
therm_str = read_file(therm_txt)
if not therm_str:
raise ValueError("No thermo data found! Either specify the thermo "
"file or turn thermo off.")
thd_strs = chemkin_therm_data_strings(therm_str)
thv_dct = thermo_value_dictionary(thd_strs, sid_dct)
spc_df['therm_val'] = tuple(
map(thv_dct.__getitem__, spc_df['species_id']))
logger.info("Finding reactions")
rxn_strs = chemkin_reactions(mech_str)
rxn_rows = []
mis_rows = []
seen = []
for num, rxn_str in enumerate(rxn_strs):
if rxn_str in seen:
logger.info("Ignoring duplicate reaction {:s}".format(rxn_str))
continue
else:
seen.append(rxn_str)
ck_num = num + 1
rid = translate_chemkin_reaction(rxn_str, sid_dct)
if rid:
rid = canonical_reaction_identifier(rid)
logger.info("Found reaction {:d}: {:s}".format(ck_num, rid))
rxn_rows.append((rid, ck_num, rxn_str))
else:
logger.info("Failed to translate reaction {:d}: {:s}"
.format(ck_num, rxn_str))
mis_rows.append((ck_num, rxn_str))
spc_df = initialize_geometries(spc_df, geom_dir, id2path, logger)
rxn_cols = ('reaction_id', 'chemkin_index', 'reaction')
rxn_df = table_from_row_dicts(rxn_rows, rxn_cols)
rxn_df = sort_table(rxn_df, 'chemkin_index')
mis_cols = ('chemkin_index', 'reaction')
mis_df = table_from_row_dicts(mis_rows, mis_cols)
logger.info("Writing species to {:s}".format(spc_csv_out))
write_table_to_csv(spc_df, spc_csv_out, float_fmt='%.8f')
logger.info("Writing reactions to {:s}".format(rxn_csv_out))
write_table_to_csv(rxn_df, rxn_csv_out, float_fmt='%.4f')
logger.info("Writing missed reactions to missed.csv")
write_table_to_csv(mis_df, 'missed.csv')
def init_from_rmg(mech_json, spc_json, rxn_csv_out, spc_csv_out, geom_dir,
id2path, logger):
""" initialize a mechanism from RMG files
"""
from .prmg import species_name as species_name_from_dct
from .prmg import species_identifier as species_identifier_from_dct
from .prmg import species_thermo_value as species_thermo_value_from_dct
from .prmg import reaction_name as reaction_name_from_dct
from .prmg import reaction_identifier as reaction_identifier_from_dct
from .prmg import reaction_sensitivity as reaction_sensitivity_from_dct
from .prmg import reaction_uncertainty as reaction_uncertainty_from_dct
from .prmg import reaction_value as reaction_value_from_dct
logger.info("Reading in {:s}".format(mech_json))
mech_rxn_dcts = read_json(mech_json)
logger.info("Reading in {:s}".format(spc_json))
spc_dcts = read_json(spc_json)
spc_strs = list(map(species_name_from_dct, spc_dcts))
spc_sids = list(map(canonical_species_identifier,
map(species_identifier_from_dct, spc_dcts)))
spc_thvs = list(map(species_thermo_value_from_dct, spc_dcts))
mech_rxn_strs = list(map(reaction_name_from_dct, mech_rxn_dcts))
mech_rids = list(map(canonical_reaction_identifier,
map(reaction_identifier_from_dct, mech_rxn_dcts)))
mech_stvys = list(map(reaction_sensitivity_from_dct, mech_rxn_dcts))
mech_uctys = list(map(reaction_uncertainty_from_dct, mech_rxn_dcts))
mech_rvals = list(map(reaction_value_from_dct, mech_rxn_dcts))
spc_cols = (spc_sids, spc_strs, spc_thvs)
spc_col_keys = ('species_id', 'species', 'therm_val')
spc_df = table_from_columns(spc_cols, spc_col_keys)
spc_df = initialize_geometries(spc_df, geom_dir, id2path, logger)
rxn_cols = (mech_rids, mech_rxn_strs, mech_stvys, mech_uctys, mech_rvals)
rxn_col_keys = ('reaction_id', 'reaction', 'sensitivity', 'uncertainty',
'rmg_value')
rxn_df = table_from_columns(rxn_cols, rxn_col_keys)
logger.info("Writing species to {:s}".format(spc_csv_out))
write_table_to_csv(spc_df, spc_csv_out, float_fmt='%.8f')
logger.info("Writing reactions to {:s}".format(rxn_csv_out))
write_table_to_csv(rxn_df, rxn_csv_out, float_fmt='%.4f')
def initialize_geometries(spc_df, geom_dir, id2path, logger):
""" initialize species geometries
"""
from .strid import xyz_string
logger.info("Initializing species geometries")
assert 'species_id' in spc_df
if not os.path.exists(geom_dir):
os.mkdir(geom_dir)
for idx, row in spc_df.iterrows():
sid = row['species_id']
fname = id2path(sid) + '.xyz'
fpath = os.path.join(geom_dir, fname)
if not os.path.exists(fpath):
logger.info("Writing geometry for {:s} to {:s}"
.format(sid, fpath))
dxyz = xyz_string(sid)
write_file(fpath, contents=dxyz)
else:
logger.info("Geometry for {:s} already exists at {:s}"
.format(sid, fpath))
spc_df.at[idx, 'path'] = fpath
return spc_df
def read_geometries(spc_csv):
""" a dictionary of geometries, indexed by species ID
"""
from .dotxyz import geometry
prefix = os.path.dirname(spc_csv)
add_prefix_ = partial(os.path.join, prefix)
spc_df = pandas.read_csv(spc_csv)
sids = spc_df['species_id']
paths = map(add_prefix_, spc_df['path'])
dxyzs = map(read_file, paths)
mgeos = map(geometry, dxyzs)
mgeo_dct = dict(zip(sids, mgeos))
return mgeo_dct
def species_find_geometries(spc_csv, spc_csv_out, geom_dir, id2path, logger):
""" find species .xyz files
"""
logger.info("Reading in {:s}".format(spc_csv))
spc_df = pandas.read_csv(spc_csv)
spc_df = update_table_column_keys(spc_df, (GEOM_PATH_COL_KEY,))
sids = table_column(spc_df, SID_COL_KEY)
to_path_ = partial(os.path.join, geom_dir)
fpaths = tuple(map(to_path_, map(ADD_XYZ_EXTENSION, map(id2path, sids))))
for sid, fpath in zip(sids, fpaths):
logger.info("species {:s}".format(sid))
if os.path.exists(fpath):
logger.info(" geometry file found at {:s}".format(fpath))
spc_df = table_lookup_update(spc_df,
(SID_COL_KEY, sid),
(GEOM_PATH_COL_KEY, fpath))
else:
logger.info(" no geometry file found.")
logger.info("Writing species to {:s}".format(spc_csv_out))
write_table_to_csv(spc_df, spc_csv_out)
def species_fill_geometries(spc_csv, spc_csv_out, geom_dir, id2path, logger):
""" find species .xyz files
"""
from .strid import xyz_string
logger.info("Reading in {:s}".format(spc_csv))
spc_df = pandas.read_csv(spc_csv)
if not os.path.exists(geom_dir):
os.mkdir(geom_dir)
spc_df = update_table_column_keys(spc_df, (GEOM_PATH_COL_KEY,))
sids = table_column(spc_df, SID_COL_KEY)
geom_fpaths = table_column(spc_df, GEOM_PATH_COL_KEY)
pathless_sids = tuple(sid for sid, fpath in zip(sids, geom_fpaths)
if is_empty_table_value(fpath))
to_fpath_ = partial(os.path.join, geom_dir)
new_geom_fpaths = tuple(
map(to_fpath_, map(ADD_XYZ_EXTENSION, map(id2path, pathless_sids))))
for sid, fpath in zip(pathless_sids, new_geom_fpaths):
logger.info("species {:s}".format(sid))
if os.path.exists(fpath):
logger.info(" file already exists; use geometry finder to add "
" it to the database")
else:
logger.info("Writing geometry for {:s} to {:s}"
.format(sid, fpath))
dxyz = xyz_string(sid)
write_file(fpath, contents=dxyz)
spc_df = table_lookup_update(spc_df,
(SID_COL_KEY, sid),
(GEOM_PATH_COL_KEY, fpath))
logger.info("Writing species to {:s}".format(spc_csv_out))
write_table_to_csv(spc_df, spc_csv_out)
def reactions_find_arrhenius(rxn_csv, rxn_csv_out, logger):
""" get arrhenius parameters from job directories
"""
from .ptorsscan import arrhenius as arrhenius_from_plog
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
col_keys = table_column_keys(rxn_df)
assert 'reaction_id' in col_keys and 'path' in col_keys
prefix = os.path.dirname(rxn_csv)
def _get(rxn_row):
arrh = None
rid = rxn_row['reaction_id']
if not is_empty_table_value(rxn_row['path']):
path = os.path.join(prefix, rxn_row['path'])
logger.info("reaction {:s}".format(rid))
plog_path = os.path.join(path, 'rate.plog')
if os.path.isfile(plog_path):
logger.info(plog_path)
plog_str = read_file(plog_path)
arrh = arrhenius_from_plog(plog_str)
if arrh:
logger.info("A={:f}, b={:f}, Ea={:f}".format(*arrh))
else:
logger.info("No rate.plog file found")
return arrh if arrh else (None, None, None)
arrh_col_keys = ('arrh_a', 'arrh_b', 'arrh_e')
rxn_df = update_table_column_keys(rxn_df, col_keys=arrh_col_keys)
arrh_as, arrh_bs, arrh_es = zip(
*map(_get, iterate_table_row_dicts(rxn_df)))
rxn_df['arrh_a'] = arrh_as
rxn_df['arrh_b'] = arrh_bs
rxn_df['arrh_e'] = arrh_es
logger.info("Writing updated reaction table to {:s}".format(rxn_csv))
write_table_to_csv(rxn_df, rxn_csv_out)
def reactions_plot_arrhenius(rxn_csv, rxn_csv_ref, rxn_csv_out, plot_dir,
extension, tmp_rng, lbl_col_keys, id2path,
logger):
""" make Arrhenius plots
"""
from .plot import write_diagram
from .iohelp import arrhenius_diagram
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
assert table_has_column_keys(rxn_df, ARRH_COL_KEYS)
assert table_has_column_keys(rxn_df, lbl_col_keys)
rxn_df = update_table_column_keys(rxn_df, ('plot_path',))
assert len(tmp_rng) == 2 and tmp_rng[0] < tmp_rng[1]
tmp_lo, tmp_hi = tmp_rng
if rxn_csv_ref:
logger.info("Reading in {:s}".format(rxn_csv_ref))
rxn_df_ref = pandas.read_csv(rxn_csv_ref)
assert table_has_column_keys(rxn_df_ref, ARRH_COL_KEYS)
else:
rxn_df_ref = None
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
for row in iterate_table_row_dicts(rxn_df):
rid = row['reaction_id']
logger.info("reaction {:s}".format(rid))
cfts = tuple(map(row.__getitem__, ARRH_COL_KEYS))
lbls = tuple(map(row.__getitem__, lbl_col_keys))
ref_cfts = None
if rxn_df_ref is not None:
ref_row = table_lookup_row(rxn_df_ref, ('reaction_id', rid))
if ref_row:
ref_cfts = tuple(map(ref_row.__getitem__, ARRH_COL_KEYS))
arrh_dgm = arrhenius_diagram(cfts, ref_cfts, tmp_lo, tmp_hi, lbls)
if arrh_dgm:
fname = '{:s}.{:s}'.format(id2path(rid), extension)
fpath = os.path.join(plot_dir, fname)
logger.info(" writing plot to {:s}".format(fpath))
write_diagram(arrh_dgm, fpath, close=True)
rxn_df = table_lookup_update(rxn_df,
('reaction_id', rid),
('plot_path', fpath))
else:
logger.info(" missing Arrhenius coefficients; skipping...")
logger.info("Writing updated reaction table to {:s}".format(rxn_csv_out))
write_table_to_csv(rxn_df, rxn_csv_out)
def read_thermo_data(spc_csv):
""" a dictionary of thermo values (H298), indexed by species ID
"""
thv_dct = None
spc_df = pandas.read_csv(spc_csv)
if 'therm_val' in spc_df:
thv_dct = dict(zip(spc_df['species_id'], spc_df['therm_val']))
return thv_dct
def chemkin_to_csv(mech_txt, thm_txt, rxn_csv_out, spc_csv_out, logger):
""" parse CHEMKIN files
"""
from .pchemkin import species as chemkin_species
from .pchemkin import reactions as chemkin_reactions
from .pchemkin import (thermodynamics_dictionaries as
chemkin_thermodynamics_dictionaries)
from .pchemkin import kinetics as chemkin_kinetics
logger.info("Reading in {:s}".format(mech_txt))
mech_str = read_file(mech_txt)
if thm_txt:
logger.info("Reading in {:s}".format(thm_txt))
thm_str = read_file(thm_txt)
else:
logger.info("No thermo file. Looking for thermo data in {:s}."
.format(mech_txt))
thm_str = mech_str
logger.info("Finding species")
spcs = chemkin_species(mech_str)
spc_ck_idxs = tuple(range(1, len(spcs)+1))
logger.info("Finding reactions")
rxns = chemkin_reactions(mech_str)
rxn_ck_idxs = tuple(range(1, len(rxns)+1))
logger.info("Finding thermodynamics data")
thm_dcts = chemkin_thermodynamics_dictionaries(thm_str)
logger.info("Finding kinetics data")
kin_lst, reacs = chemkin_kinetics(mech_str)
spc_df = table_from_columns((spc_ck_idxs, spcs),
('chemkin_index', 'species'))
rxn_df = table_from_columns((rxn_ck_idxs, rxns),
('chemkin_index', 'reaction'))
for rxn in rxns:
if rxn not in reacs:
logger.info(rxn)
if kin_lst:
assert len(kin_lst) == len(rxns)
arrh_cols = tuple(zip(*kin_lst))
rxn_df = append_table_columns(rxn_df, arrh_cols, ARRH_COL_KEYS)
if thm_dcts:
nasa_lo_dct, nasa_hi_dct, nasa_t_dct = thm_dcts
nasa_lo_cols = tuple(zip(*map(nasa_lo_dct.__getitem__, spcs)))
nasa_hi_cols = tuple(zip(*map(nasa_hi_dct.__getitem__, spcs)))
nasa_t_cols = tuple(zip(*map(nasa_t_dct.__getitem__, spcs)))
thm_col_keys = NASA_LO_COL_KEYS + NASA_HI_COL_KEYS + NASA_T_COL_KEYS
thm_cols = nasa_lo_cols + nasa_hi_cols + nasa_t_cols
spc_df = append_table_columns(spc_df, thm_cols, thm_col_keys)
logger.info("Writing species to {:s}".format(spc_csv_out))
write_table_to_csv(spc_df, spc_csv_out)
logger.info("Writing reactions to {:s}".format(rxn_csv_out))
write_table_to_csv(rxn_df, rxn_csv_out)
def chemkin_id_reactions(rxn_csv, spc_csv, rxn_csv_out, spc_csv_out, logger):
""" determine reaction identifiers for CHEMKIN reactions
"""
from .iohelp import translate_chemkin_reaction
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
logger.info("Reading in {:s}".format(spc_csv))
spc_df = pandas.read_csv(spc_csv)
assert table_has_column_keys(spc_df, ('species', 'species_id'))
logger.info("Canonicalizing species IDs")
sids = table_column(spc_df, 'species_id')
can_sids = tuple(map(canonical_species_identifier, sids))
spc_df = set_table_column(spc_df, 'species_id', can_sids)
sid_dct = dict(zip(*table_columns(spc_df, ('species', 'species_id'))))
rxns = table_column(rxn_df, 'reaction')
rids = tuple(translate_chemkin_reaction(rxn, sid_dct) for rxn in rxns)
can_rids = tuple(canonical_reaction_identifier(rid) if rid else None
for rid in rids)
rxn_df = set_table_column(rxn_df, 'reaction_id', can_rids)
spc_df = move_table_column_to_front(spc_df, 'species_id')
rxn_df = move_table_column_to_front(rxn_df, 'reaction_id')
logger.info("Writing species to {:s}".format(spc_csv_out))
write_table_to_csv(spc_df, spc_csv_out)
logger.info("Writing reactions to {:s}".format(rxn_csv_out))
write_table_to_csv(rxn_df, rxn_csv_out)
def reactions_to_chemkin(cls, rxn_csv, spc_csv, mech_txt_out, logger):
""" generate CHEMKIN files from CSVs
"""
rct_sid_col_keys = dict(REACTANT_SID_COL_KEYS)[cls]
prd_sid_col_keys = dict(PRODUCT_SID_COL_KEYS)[cls]
logger.info("Reading in {:s}".format(spc_csv))
spc_df = pandas.read_csv(spc_csv)
spc_col_keys = table_column_keys(spc_df)
assert 'species' in spc_col_keys and 'species_id' in spc_col_keys
spc_dct = dict(zip(*table_columns(spc_df, ('species_id', 'species'))))
def _chemkin_reaction_name(rct_sids, prd_sids):
rct_str = '+'.join(map(spc_dct.__getitem__, rct_sids))
prd_str = '+'.join(map(spc_dct.__getitem__, prd_sids))
rxn = '='.join([rct_str, prd_str])
return rxn
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
rxn_col_keys = table_column_keys(rxn_df)
assert all(col_key in rxn_col_keys for col_key in rct_sid_col_keys)
assert all(col_key in rxn_col_keys for col_key in prd_sid_col_keys)
rct_sids_lst = tuple(zip(*table_columns(rxn_df, rct_sid_col_keys)))
prd_sids_lst = tuple(zip(*table_columns(rxn_df, prd_sid_col_keys)))
rxns = tuple(starmap(_chemkin_reaction_name,
zip(rct_sids_lst, prd_sids_lst)))
assert all(col_key in rxn_col_keys for col_key in ARRH_COL_KEYS)
arrh_cfts_lst = zip(*table_columns(rxn_df, ARRH_COL_KEYS))
rxn_fmt = '{:{width}s} {:10.3e} {:8.3f} {:12.3f}'
rxn_wd = max(map(len, rxns)) + 5
format_ = partial(rxn_fmt.format, width=rxn_wd)
rxn_block_str = '\n'.join(
format_(rxn, *arrh_cfts) for rxn, arrh_cfts in zip(rxns, arrh_cfts_lst)
if not any(map(is_empty_table_value, arrh_cfts)))
mech_str = '\n'.join(['REACTIONS', rxn_block_str, 'END'])
logger.info("Writing reactions to {:s}".format(mech_txt_out))
write_file(mech_txt_out, mech_str)
def reactions_init(cls, rxn_csv, spc_csv, rxn_csv_out, cdt_csv_out, logger):
""" initialize reactions
"""
_init = reactions_initializer(
cls=cls,
is_candidate=dict(REACTION_CANDIDATE_FINDERS)[cls],
reaction=dict(REACTION_FINDERS)[cls],
sid_cols=dict(REACTION_SID_COL_KEYS)[cls],
idx_cols=dict(REACTION_IDX_COL_KEYS)[cls]
)
return _init(spc_csv, rxn_csv, rxn_csv_out, cdt_csv_out, logger)
def reactions_run_old(cls, rxn_csv, spc_csv, rxn_rng_strs, tpl_txt, nodes,
run_dir, id2path, job_argv, logger):
""" reactions runner
"""
_run = reactions_runner(
cls=cls,
reaction_xyz_strings=dict(REACTION_XYZ_STRING_MAKERS)[cls],
reaction_input_string=dict(REACTION_INPUT_STRING_MAKERS)[cls],
sid_cols=dict(REACTION_SID_COL_KEYS)[cls],
idx_cols=dict(REACTION_IDX_COL_KEYS)[cls]
)
_run(spc_csv, rxn_csv, tpl_txt, rxn_rng_strs, nodes, run_dir, id2path,
job_argv, logger)
def divide(key, dir1, dir2, rxn_csv, rxn_csv_out, logger):
""" split reactions by key
"""
from .strid import is_radical_radical
from .strid import is_spin_balanced
if key == 'rad-rad':
meets_condition_ = is_radical_radical
elif key == 'high-spin':
meets_condition_ = is_spin_balanced
else:
raise ValueError("Unrecognized divide key: {:s}".format(key))
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
rxn_df[key] = tuple(map(meets_condition_, rxn_df['reaction_id']))
rxn_df1 = rxn_df[rxn_df[key]].drop(columns=key)
rxn_df2 = rxn_df[~rxn_df[key]].drop(columns=key)
rxn_csv1_out = os.path.join(dir1, rxn_csv_out)
logger.info("Writing in-category reactions to {:s}"
.format(rxn_csv1_out))
if not os.path.exists(dir1):
os.mkdir(dir1)
write_table_to_csv(rxn_df1, rxn_csv1_out)
rxn_csv2_out = os.path.join(dir2, rxn_csv_out)
logger.info("Writing out-of-category reactions to {:s}"
.format(rxn_csv2_out))
if not os.path.exists(dir2):
os.mkdir(dir2)
write_table_to_csv(rxn_df2, rxn_csv2_out)
logger.info("Writing updated reaction table to {:s}".format(rxn_csv))
write_table_to_csv(rxn_df, rxn_csv)
def csv_reindex(table_csv, logger):
""" reindex a table
"""
logger.info("Reading in {:s}".format(table_csv))
table_df = pandas.read_csv(table_csv)
table_df = reindex_table(table_df)
logger.info("Writing updated {:s}".format(table_csv))
write_table_to_csv(table_df, table_csv)
def csv_sort(table_csv, col_key, descending, logger):
""" sort table by column
"""
logger.info("Reading in {:s}".format(table_csv))
table_df = pandas.read_csv(table_csv)
table_df = sort_table(table_df, col_key, descending=descending)
logger.info("Writing updated {:s}".format(table_csv))
write_table_to_csv(table_df, table_csv)
def csv_intersect(table_csvs, col_key, table_csv_out, logger):
""" intersect tables by column
"""
table_dfs = []
for table_csv in table_csvs:
logger.info("Reading in {:s}".format(table_csv))
table_df = pandas.read_csv(table_csv)
table_dfs.append(table_df)
table_df_out = intersect_tables(table_dfs, col_key)
logger.info("Writing {:s}".format(table_csv_out))
write_table_to_csv(table_df_out, table_csv_out)
def csv_merge(table_csvs, col_key, table_csv_out, logger):
""" merge tables by column
"""
table_dfs = []
for table_csv in table_csvs:
logger.info("Reading in {:s}".format(table_csv))
table_df = pandas.read_csv(table_csv)
table_dfs.append(table_df)
table_df_out = merge_tables(table_dfs, col_key)
logger.info("Writing {:s}".format(table_csv_out))
write_table_to_csv(table_df_out, table_csv_out)
# meta scripts
def reactions_initializer(cls, is_candidate, reaction, sid_cols, idx_cols):
""" initialize reactions
"""
assert cls in ('abstraction', 'addition', 'migration')
def _init(spc_csv, rxn_csv, rxn_csv_out, cdt_csv_out, logger):
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
logger.info("Reading in species geometries from {:s}".format(spc_csv))
mgeo_dct = read_geometries(spc_csv)
logger.info("Reading thermo data from {:s}".format(spc_csv))
thv_dct = read_thermo_data(spc_csv)
if not thv_dct:
logger.info("No thermo data found.")
rxn_df_out = table_with_columns_like(rxn_df)
cdt_df_out = table_with_columns_like(rxn_df)
rxn_df_out = update_table_column_keys(rxn_df_out,
col_keys=sid_cols+idx_cols)
cdt_df_out = update_table_column_keys(cdt_df_out,
col_keys=('exception',))
rxn_df = update_table_column_keys(rxn_df, col_keys=('class',))
for rxn_row in iterate_table_row_dicts(rxn_df):
rid = rxn_row['reaction_id']
if is_candidate(rid):
logger.info('reaction: {:s}'.format(rid))
err = None
try:
rxn = reaction(rid, mgeo_dct, thv_dct)
except Exception as err:
logger.info(' exception: {:s}!'.format(str(err)))
rxn = None
if rxn:
sids, idxs = rxn
logger.info(' found {:s}!'.format(cls))
log_sids = ', '.join(
'{:s}: {:s}'.format(sid_col, sid)
for sid_col, sid in zip(sid_cols, sids))
log_idxs = ', '.join(
'{:s}: {:d}'.format(idx_col, idx)
for idx_col, idx in zip(idx_cols, idxs))
logger.info(' {:s}\n {:s}'.format(log_sids, log_idxs))
rxn_df = table_lookup_update(rxn_df, ('reaction_id', rid),
('class', cls))
rxn_row.update(zip(sid_cols, sids))
rxn_row.update(zip(idx_cols, idxs))
rxn_df_out = append_table_row_dicts(rxn_df_out, (rxn_row,))
else:
rxn_row['exception'] = err
cdt_df_out = append_table_row_dicts(cdt_df_out, (rxn_row,))
logger.info("Writing {:s} reactions to {:s}"
.format(cls, os.path.abspath(rxn_csv_out)))
rxn_df_out = reindex_table(rxn_df_out)
write_table_to_csv(rxn_df_out, rxn_csv_out)
logger.info("Writing left-over candidates to {:s}"
.format(os.path.abspath(cdt_csv_out)))
write_table_to_csv(cdt_df_out, cdt_csv_out)
logger.info("Writing updated reaction table to {:s}".format(rxn_csv))
write_table_to_csv(rxn_df, rxn_csv)
return _init
def reactions_setup_run(cls, rxn_csv, spc_csv, rxn_rng_strs, run_dir, id2path,
cmd_argv, logger):
""" write xyz files for the runner
"""
assert cls in ('abstraction', 'addition', 'migration')
reaction_xyz_strings = dict(REACTION_XYZ_STRING_MAKERS)[cls]
sid_cols = dict(REACTION_SID_COL_KEYS)[cls]
idx_cols = dict(REACTION_IDX_COL_KEYS)[cls]
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
col_keys = table_column_keys(rxn_df)
assert RID_COL_KEY in col_keys and RXN_IDX_COL_KEY in col_keys
logger.info("Reading in species geometries from {:s}".format(spc_csv))
mgeo_dct = read_geometries(spc_csv)
if rxn_rng_strs:
rxn_idxs = _interpret_range_strings(rxn_rng_strs)
logger.info("Interpreted reaction index range argument: {:s}"
.format(repr(rxn_idxs)))
else:
logger.info("No reaction range argument. Running all reactions.")
rxn_idxs = table_column(rxn_df, RXN_IDX_COL_KEY)
if not os.path.exists(run_dir):
logger.info("Creating run directory {:s}".format(run_dir))
os.mkdir(run_dir)
if not os.path.exists(run_dir):
logger.info("Creating run directory {:s}".format(run_dir))
os.mkdir(run_dir)
def _create_job_dir(idx, rid, sids, idxs):
logger.info("reaction {:d}: {:s}".format(idx, rid))
logger.info(' indices: {:s}'.format(str(idxs)))
dname = id2path(rid)
dpath = os.path.join(run_dir, dname)
logger.info(" Creating job directory {:s}".format(dpath))
if not os.path.exists(dpath):
os.mkdir(dpath)
ret = (EMPTY, RXN_NOT_CREATED_VAL)
dxyz_dct = reaction_xyz_strings(sids, idxs, mgeo_dct)
if dxyz_dct:
dxyz_sids = dxyz_dct.keys()
dxyzs = dxyz_dct.values()
fnames = tuple(map('{:s}.xyz'.format, map(id2path, dxyz_sids)))
fpaths = tuple(os.path.join(dpath, fname) for fname in fnames)
for fpath, dxyz in zip(fpaths, dxyzs):
logger.info(" Writing {:s}".format(fpath))
write_file(fpath, dxyz)
ret = (dpath, RXN_CREATED_VAL)
else:
logger.info(" Failed to write .xyz files.")
if cmd_argv:
cmd_str = ' '.join(cmd_argv)
logger.info(" running command `{:s}` in {:s}"
.format(cmd_str, dpath))
try:
subprocess.check_call(cmd_argv, cwd=dpath)
except Exception as err:
logger.info(" {:s}".format(err))
logger.info('')
return ret
logger.info("Writing job .xyz files")
sub_rxn_df = sql_where_in(rxn_df, RXN_IDX_COL_KEY, rxn_idxs)
sub_idxs = tuple(sql_select_one(sub_rxn_df, RXN_IDX_COL_KEY))
sub_rids = tuple(sql_select_one(sub_rxn_df, RID_COL_KEY))
sub_sids_lst = tuple(zip(*(
sql_select_one(sub_rxn_df, sid_col) for sid_col in sid_cols)))
sub_idxs_lst = tuple(zip(*(
sql_select_one(sub_rxn_df, idx_col) for idx_col in idx_cols)))
paths, stats = zip(*starmap(
_create_job_dir, zip(sub_idxs, sub_rids, sub_sids_lst, sub_idxs_lst)))
rxn_df = update_table_column_keys(rxn_df,
(RXN_PATH_COL_KEY, RXN_STAT_COL_KEY))
rxn_df = update_column_by_index(rxn_df, row_indices(sub_rxn_df),
RXN_PATH_COL_KEY, paths)
rxn_df = update_column_by_index(rxn_df, row_indices(sub_rxn_df),
RXN_STAT_COL_KEY, stats)
logger.info("Writing updated reaction table to {:s}".format(rxn_csv))
write_table_to_csv(rxn_df, rxn_csv)
def reactions_run(cls, rxn_csv, rxn_rng_strs, tpl_txt, nodes, job_argv,
logger):
""" reactions parallel runner
"""
assert cls in ('abstraction', 'addition', 'migration')
if not hasattr(job_argv, '__iter__'):
raise ValueError("Missing run command.")
sid_cols = dict(REACTION_SID_COL_KEYS)[cls]
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
col_keys = table_column_keys(rxn_df)
assert (RID_COL_KEY in col_keys and RXN_IDX_COL_KEY in col_keys and
RXN_PATH_COL_KEY in col_keys and RXN_STAT_COL_KEY in col_keys)
if rxn_rng_strs:
rxn_idxs = _interpret_range_strings(rxn_rng_strs)
logger.info("Interpreted reaction index range argument: {:s}"
.format(repr(rxn_idxs)))
else:
logger.info("No reaction range argument. Running all reactions.")
rxn_idxs = table_column(rxn_df, RXN_IDX_COL_KEY)
logger.info("Reading template file from {:s}".format(tpl_txt))
tpl_str = read_file(tpl_txt)
def _submit_job(idx, rid, path, sids, worker_id):
node = worker_id
logger.info("reaction {:d}: {:s} assigned to node {:s} worker"
.format(idx, rid, node))
inp_str = tpl_str.format(nodes=node, **dict(zip(sid_cols, sids)))
inp_fpath = os.path.join(path, 'input.dat')
logger.info(" node {:s} worker writing {:s}".format(node, inp_fpath))
write_file(inp_fpath, inp_str)
cmd_str = ' '.join(job_argv)
logger.info(" node {:s} worker running {:s} in {:s}"
.format(node, cmd_str, path))
ret = RXN_RAN_VAL
try:
subprocess.check_call(job_argv, cwd=path)
except Exception as err:
logger.info(" failed on node {:s} with error '{:s}'"
.format(node, err))
ret = RXN_FAILED_VAL
return ret
run_rxn_df = sql_where_eq(
sql_where_in(rxn_df, RXN_IDX_COL_KEY, rxn_idxs),
RXN_STAT_COL_KEY,
RXN_CREATED_VAL)
run_idxs = tuple(sql_select_one(run_rxn_df, RXN_IDX_COL_KEY))
run_rids = tuple(sql_select_one(run_rxn_df, RID_COL_KEY))
run_paths = tuple(sql_select_one(run_rxn_df, RXN_PATH_COL_KEY))
run_sids_lst = tuple(zip(*(
sql_select_one(run_rxn_df, sid_col) for sid_col in sid_cols)))
logger.info("Stepping through run directories to submit jobs")
stats = tag_team_starmap(_submit_job,
zip(run_idxs, run_rids, run_paths, run_sids_lst),
worker_ids=nodes)
rxn_df = update_column_by_index(rxn_df, row_indices(run_rxn_df),
RXN_STAT_COL_KEY, stats)
logger.info("Writing updated reaction table to {:s}".format(rxn_csv))
write_table_to_csv(rxn_df, rxn_csv)
def reactions_runner(cls, reaction_xyz_strings, reaction_input_string,
sid_cols, idx_cols):
""" run reactions
"""
assert cls in ('abstraction', 'addition', 'migration')
def _run(spc_csv, rxn_csv, tpl_txt, rxn_rng_strs, nodes, run_dir, id2path,
job_argv, logger):
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
col_keys = table_column_keys(rxn_df)
assert 'reaction_id' in col_keys and RXN_IDX_COL_KEY in col_keys
logger.info("Reading in species geometries from {:s}".format(spc_csv))
mgeo_dct = read_geometries(spc_csv)
logger.info("Reading template file from {:s}".format(tpl_txt))
tpl_str = read_file(tpl_txt)
node_str = ', '.join(nodes)
logger.info("Nodes: {:s}".format(node_str))
tpl_keyval_dct = {'nodes': node_str}
if rxn_rng_strs:
rxn_idxs = _interpret_range_strings(rxn_rng_strs)
logger.info("Interpreted reaction index range argument: {:s}"
.format(repr(rxn_idxs)))
else:
logger.info("No reaction range argument. Running all reactions.")
rxn_idxs = table_column(rxn_df, RXN_IDX_COL_KEY)
if not os.path.exists(run_dir):
logger.info("Creating run directory {:s}".format(run_dir))
os.mkdir(run_dir)
logger.info("Writing job files")
rxn_lkp = table_lookup_dictionary(rxn_df, RXN_IDX_COL_KEY)
for idx in rxn_idxs:
row = rxn_lkp[idx]
rid = row['reaction_id']
logger.info("reaction {:d}: {:s}".format(idx, rid))
sids = tuple(map(row.__getitem__, sid_cols))
idxs = tuple(map(row.__getitem__, idx_cols))
logger.info(' indices: {:s}'.format(str(idxs)))
dxyz_dct = reaction_xyz_strings(sids, idxs, mgeo_dct)
if dxyz_dct:
dxyz_sids = dxyz_dct.keys()
dxyzs = dxyz_dct.values()
dname = id2path(rid)
dpath = os.path.join(run_dir, dname)
logger.info(" Creating job directory {:s}".format(dpath))
if not os.path.exists(dpath):
os.mkdir(dpath)
fnames = tuple(map('{:s}.xyz'.format, map(id2path, dxyz_sids)))
fpaths = tuple(os.path.join(dpath, fname) for fname in fnames)
for fpath, dxyz in zip(fpaths, dxyzs):
logger.info(" Writing {:s}".format(fpath))
write_file(fpath, dxyz)
inp_str = reaction_input_string(sids, tpl_str, tpl_keyval_dct)
inp_fpath = os.path.join(dpath, 'input.dat')
logger.info(" Writing {:s}".format(inp_fpath))
write_file(inp_fpath, inp_str)
rxn_df.loc[idx, 'created'] = True
rxn_df.loc[idx, 'path'] = dpath
else:
logger.info(" Failed to create .xyz files")
rxn_df.loc[idx, 'created'] = False
logger.info("Writing updated reaction table to {:s}".format(rxn_csv))
write_table_to_csv(rxn_df, rxn_csv)
owd = os.getcwd()
logger.info("Running job command in successfully created directories")
rxn_lkp = table_lookup_dictionary(rxn_df, RXN_IDX_COL_KEY)
for idx in rxn_idxs:
row = rxn_lkp[idx]
if row['created']:
rid = row['reaction_id']
logger.info("reaction {:d}: {:s}".format(idx, rid))
path = row['path']
logger.info(' entering {:s}'.format(path))
os.chdir(path)
cmd_str = ' '.join(job_argv)
logger.info(" running command '{:s}' in {:s}"
.format(cmd_str, path))
try:
subprocess.check_call(job_argv)
except Exception as err:
logger.info(" command '{:s}' failed with error '{:s}'"
.format(cmd_str, err))
os.chdir(owd)
return _run
# non-logging functions
def write_file(fpath, contents, mode='w'):
""" write contents to a file
"""
fle = open(fpath, mode)
fle.write(str(contents))
fle.close()
def read_file(file_txt):
""" read file contents as a string
"""
with open(file_txt, encoding='utf8', errors='ignore') as file_obj:
file_str = file_obj.read()
return file_str
def read_json(fpath):
""" read json file
"""
return json.load(open(fpath))
def write_table_to_csv(table_df, table_csv, float_fmt=None):
""" write table to csv
"""
timestamp_if_exists(table_csv)
table_df.to_csv(table_csv, index=False, float_format=float_fmt)
def timestamp_if_exists(fpath):
""" open a file, avoiding overwrites if requested
"""
if os.path.isfile(fpath):
time_stamp = time.strftime("%Y%m%d-%H%M%S")
new_fpath = "{:s}_{:s}".format(fpath, time_stamp)
os.rename(fpath, new_fpath)
# helpers
def _interpret_template_key_values(tmp_keyval_str):
tmp_keyval_dct = dict(
(strip_spaces(s) for s in split(':', kv))
for kv in split('|', tmp_keyval_str))
return tmp_keyval_dct
def _interpret_range_strings(rng_strs):
def _interpret_range_string(rng_str):
split_rng = split('-', rng_str)
if len(split_rng) == 1:
rng = [int(split_rng[-1])]
elif len(split_rng) == 2:
start, stop = map(int, split_rng)
rng = list(range(start, stop+1))
else:
raise ValueError("Failed to interet index ranges")
return rng
return tuple(chain(*map(_interpret_range_string, rng_strs)))
|
from .base import Base
from abc import abstractmethod
class Routing(Base):
def __init__(self, di):
super().__init__(di)
@abstractmethod
def handler_classes(self, configuration):
pass
@abstractmethod
def handle(self, input_output):
pass
def build_handler(self, handler_class, configuration=None):
if configuration is None:
configuration = self._configuration
handler = self._di.build(handler_class, cache=False)
handler_configuration = {}
for key in handler._configuration_defaults.keys():
if key in configuration:
handler_configuration[key] = configuration[key]
for key in handler._global_configuration_defaults.keys():
if key in configuration:
handler_configuration[key] = configuration[key]
handler.configure(self._finalize_configuration_for_sub_handler(handler_configuration, handler_class))
return handler
def _finalize_configuration_for_sub_handler(self, configuration, handler_class):
return configuration
def configure(self, configuration):
# we need to completely clobber the base configuration process because it expects to have
# the list of all allowed configurations. We don't know what that list is - rather, we
# just need to fulfill the requirements of the handlers we'll be routing to.
# We also want to make it possible for handlers that extend this to still define their
# own possible configuration values. Therefore, we'll loop over all of the handlers
# which we might route to, make them, have them check the configs, and let them throw exceptions
# as needed. Finally we'll figure out what configs may not have been "used" by a child handler
# and see if those are in our own configuration - if not, we'll throw an "Unknown config" exception
# First, let's check the configuration for the handlers, which is just a matter of building
# the handlers (they willl automatically throw exceptions for invalid configurations as part
# of this process)
used_configs = list(self._global_configuration_defaults.keys())
used_configs.extend(self._configuration_defaults.keys())
for handler_class in self.handler_classes(configuration):
handler = self.build_handler(handler_class, configuration=configuration)
used_configs.extend(handler._configuration_defaults.keys())
for key in configuration.keys():
if key not in used_configs and key not in self._global_configuration_defaults:
class_name = self.__class__.__name__
raise KeyError(f"Attempt to set unkown configuration setting '{key}' for handler '{class_name}'")
self._check_configuration(configuration)
self._configuration = self._finalize_configuration(self.apply_default_configuation(configuration))
def _check_configuration(self, configuration):
super()._check_configuration(configuration)
|
"""Logging utilities."""
import logging
def all_loggers(root=True, placeholders=False):
"""Yield all loggers."""
logger_manager = logging.Logger.manager
if root:
yield 'root', logger_manager.root
for name, logger in logger_manager.loggerDict.iteritems():
if placeholders or isinstance(logger, logging.Logger):
yield name, logger
def loggers_at_level(level, root=False):
"""
Yield all logger at a particular level.
Generator yielding each logger that has a level
set to level.
Args:
level (int): The logging level to search for
root (bool): Include the root logger.
Returns:
tuple: Yields a tuple containing (name, logger)
"""
for name, logger in all_loggers(root):
if logger.level == level:
yield name, logger
def loggers_not_at_level(level, root=False):
"""
Yield all logger not at a particular level.
Generator yielding each logger that has a level
not set to level.
Args:
level (int): The logging level to search for
root (bool): Include the root logger
Returns:
tuple: Yields a tuple containing (name, logger)
"""
for name, logger in all_loggers(root):
if logger.level != level:
yield name, logger
def loggers_with_handlers(root=False):
"""
Yield all logger that have an associated handler.
Generator yielding each logger that has an
attached handler
Args:
root (bool): Include the root logger
Returns:
tuple: Yields a tuple containing (name, logger)
"""
for name, logger in all_loggers(root):
if logger.handlers:
yield name, logger
def loggers_without_handlers(root=False):
"""
Yield all logger that don't have an associated handler.
Generator yielding each logger that has not got an
attached handler
Args:
root (bool): Include the root logger
Returns:
tuple: Yields a tuple containing (name, logger)
"""
for name, logger in all_loggers(root):
if not logger.handlers:
yield name, logger
|
import requests
import json
import sys
import uuid
# from marconi.tests.functional import helpers
# from endpoints import IDENTITY_URL,MARCONI_URL
# from authentication import auth
MARCONI_URL = 'http://127.0.0.1:8888/v1/queues/'
# Create Queue
def create_queue(queue_name,custom_uuid):
Final_URL = MARCONI_URL+'{0}'.format(queue_name)
_headers={'Client-ID':str(custom_uuid)}
response = requests.put(Final_URL,verify=False,headers=_headers)
# print response.headers,response.content,response.status_code
# Delete Queue
def delete_queue(queue_name,custom_uuid):
# XAuthToken = response['access']['token']['id']
Final_URL = MARCONI_URL+'{0}'.format(queue_name)
_headers={'Client-ID':str(custom_uuid)}
response = requests.delete(Final_URL,verify=False,headers=_headers)
# print response.headers,response.content,response.status_code
# Insert Messages into the Queue
def insert_messages(data,queue_name,custom_uuid):
# XAuthToken = response['access']['token']['id']
URL = MARCONI_URL+'{0}/messages'.format(queue_name)
_headers={'Client-ID':str(custom_uuid)}
# print data
# print type(data)
response=requests.post(URL,data=json.dumps(data),headers=_headers)
# print response.headers,response.content,response.status_code
# Claim Messages
def claim_messages(custom_uuid,data,queue_name,limit):
# XAuthToken = response['access']['token']['id']
URL = MARCONI_URL+'{0}/claims?limit={1}'.format(queue_name,limit)
_headers={'Client-ID':str(custom_uuid)}
# _headers={'content-type': 'application/json','Client-ID':'PingPongBot','X-Auth-Token':XAuthToken,'X-Project-Id':234}
response = requests.post(URL,data=str(data),headers=_headers,verify=False)
# print q.headers,q.content,q.status_code
if response.status_code != 204:
data = response.json()
# print data
message = json.loads(json.dumps(data[0]))
firstsplit = message['href'].split('/')
second_split = firstsplit[len(firstsplit)-1].split('?claim_id=')
# message id followed by claim id
# print second_split[0], second_split[1]
return (message['body'],second_split[0],second_split[1])
else:
return (response.status_code,0,0)
# print (response,'0','0',response.status_code)
# Delete Message with claim
def delete_messages_with_claim(custom_uuid,message_ids,queue_name,claim_id):
# XAuthToken = response['access']['token']['id']
URL = MARCONI_URL+'{0}/messages/{1}?{2}'.format(queue_name,message_ids,claim_id)
# _headers={'content-type': 'application/json','Client-ID':'PingPongBot','X-Auth-Token':XAuthToken,'X-Project-Id':234}
_headers={'Client-ID':str(custom_uuid)}
response = requests.delete(URL,headers=_headers,verify=False)
# print response.headers,response.content,response.status_code
def delete_claim(custom_uuid, claim_id, queue_name):
# XAuthToken = response['access']['token']['id']
URL = MARCONI_URL+'{0}/claims/{1}'.format(queue_name,claim_id)
# _headers={'content-type': 'application/json','Client-ID':'PingPongBot','X-Auth-Token':XAuthToken,'X-Project-Id':234}
_headers={'Client-ID':str(custom_uuid)}
response = requests.delete(URL,headers=_headers,verify=False)
# print response.headers,response.content,response.status_code
def request_body(event,vs,time):
body = {'event':event,
'vs':vs,
'time':time}
message = {'body' : body,
'ttl' : 100}
return [message]
def request_body_splitfile(filename,filelocation):
body = {'filename':filename,
'filelocation':filelocation}
message = {'body': body,
'ttl': 100}
return [message]
def request_body_queue(filename,filelocation):
body = {'filename':filename,
'filelocation':filelocation}
return body
def construct_json(body1):
message = {'body' : body1,
'ttl' : 100}
return message
def for_claim():
data = {'ttl':100,'grace':100}
return json.dumps(data)
# def main():
# # body = {}
# message_list=[]
# custom_uuid = uuid.uuid4()
# create_queue('sriram',custom_uuid)
# body1 = {'awesome':'True'}
# body = construct_json(body1)
# message_list.append(body)
# # message_list = helpers.create_message_body(messagecount=1)
# insert_messages(message_list,'sriram',custom_uuid)
# (message_id,claim_id) = claim_messages(custom_uuid,for_claim(),'sriram')
# delete_messages_with_claim(custom_uuid,message_id,'sriram',claim_id)
# delete_queue('sriram',custom_uuid)
# if __name__ == '__main__':
# main()
|
# 2015-07-04
# W = 16
W = 2 * 10 ** 3
# num_best = 0
# max_profit = 0
# n = 4
n = 500
class SolutionContainer:
def __init__(self, max_profit, best_set, num_best):
self.max_profit = max_profit
self.best_set = best_set
self.num_best = num_best
def getMaxProfit(self):
return self.max_profit
def setMaxProfit(self, value):
self.max_profit = value
def getBestSet(self):
return self.best_set
def setBestSet(self, best_set):
self.best_set = best_set
def getNumBest(self):
return self.num_best
def setNumBest(self, num_best):
self.num_best = num_best
# indexing starts at one
def knapsack(i, profit, weight, W, solution_container, p, w):
if (weight <= W and profit > solution_container.getMaxProfit()):
solution_container.setMaxProfit(profit)
solution_container.setNumBest(i)
solution_container.setBestSet(include[ : ])
if promising(i, W, solution_container, p, w, profit, weight) == True:
include[i + 1 - 1] = True
knapsack(i + 1, profit + p[i + 1 - 1], weight + w[i + 1 - 1], W, solution_container, p, w)
include[i + 1 - 1] = False
knapsack(i + 1, profit, weight, W, solution_container, p, w)
def promising(i, W, solution_container, p, w, profit, weight):
j = None
k = None
tot_weight = None
bound = None
weight = weight
profit = profit
if (weight >= W):
return False
else:
j = i + 1
# bound = p[i]
# tot_weight = w[i]
bound = profit
tot_weight = weight
# print "i, j:", i, j
# greedy fill
while (j <= n and tot_weight + w[j - 1] <= W):
tot_weight = tot_weight + w[j - 1]
bound = bound + p[j - 1]
j = j + 1
# print "curr. j:", j
# print "total weight:", tot_weight
# print i, j
k = j
# print "pre-fractional-add bound:", bound
# print "residual capacity:", W - tot_weight
# print "k:", k
if (k <= n):
# print p[k - 1], w[k - 1]
bound = bound + (W - tot_weight) * p[k - 1] / (1.0 * w[k - 1])
# print bound, solution_container.getMaxProfit()
return bound > solution_container.getMaxProfit()
p = []
w = []
include = [False] * n
import random
p_w_pair_list = []
for i in range(n):
profit = random.randint(1, 100)
weight = random.randint(1, W)
p_w_pair = (profit, weight)
p_w_pair_list.append(p_w_pair)
# print p_w_pair_list
sorted_p_w_pair_list = sorted(p_w_pair_list, key = lambda x: x[0] / (x[1] * 1.0), reverse = True)
# print [x[0] / (x[1] * 1.0) for x in sorted_p_w_pair_list]
"""
sorted_p_w_pair_list = [(100, 29), (94, 49), (100, 60), (82, 52), (35, 26), (32, 30), (65, 81), (65, 86), (59, 79), (26, 38), (42, 67), (50, 98), (10, 22), (33, 96), (16, 48), (18, 66), (11, 59), (14, 96), (4, 41), (7, 77)]
"""
# sorted_p_w_pair_list = [(40, 2), (30, 5), (50, 10), (10, 5)]
for p_w_pair in sorted_p_w_pair_list:
profit, weight = p_w_pair
p.append(profit)
w.append(weight)
# print sorted_p_w_pair_list
solution_container = SolutionContainer(0, [False] * n, 0)
knapsack(0, 0, 0, W, solution_container, p, w)
"""
for i in range(2000):
knapsack(0, 0, 0, W, solution_container, p, w)
"""
# print solution_container.getMaxProfit()
"""
for i in range(solution_container.getNumBest()):
print solution_container.getBestSet()[i]
"""
# print p, w
best_set = solution_container.getBestSet()
# print best_set
include_indices = [x for x in range(n) if best_set[x] == True]
profit_values = [p[x] for x in include_indices]
total_profit = sum(profit_values)
weight_values = [w[x] for x in include_indices]
total_weight = sum(weight_values)
print W
# print "weight values:", w
# print "included item profit values:", profit_values
print total_profit
# print "included item weight values:", weight_values
print total_weight
|
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
# -*- coding: utf-8 -*-
"""
Function implementation test.
Usage: resilient-circuits selftest -l fn_api_void
"""
import logging
LOG = logging.getLogger(__name__)
from resilient_lib import RequestsCommon, validate_fields
from fn_api_void.lib.apivoid_helper import make_apivoid_api_call
PACKAGE_NAME = "fn_api_void"
def selftest_function(opts):
"""
Placeholder for selftest function. An example use would be to test package api connectivity.
Suggested return values are be unimplemented, success, or failure.
"""
options = opts.get(PACKAGE_NAME, {})
rc = RequestsCommon(opts, options)
reason = "Test was successful!"
try:
# Get and validate app configs
valid_app_configs = validate_fields(["apivoid_base_url", "apivoid_sub_url", "apivoid_api_key"], options)
# Execute api call
res = make_apivoid_api_call(
base_url=valid_app_configs.get("apivoid_base_url"),
sub_url=valid_app_configs.get("apivoid_sub_url"),
query_type="selftest",
value=True,
api_key=valid_app_configs.get("apivoid_api_key"),
rc=rc
)
res = res.json()
if res.get("success"):
LOG.info("%s\nCredits Remaining:\t%s\nEstimated Queries:\t%s", reason, res.get("credits_remained", "Unknown"), res.get("estimated_queries", "Unknown"))
return {"state": "success"}
elif res.get("error"):
reason = res.get("error")
LOG.error(reason)
return {"state": "failure", "reason": reason}
reason = "Test was not successful. An unknown error occurred"
LOG.error(reason)
return {"state": "failure", "reason": reason}
except Exception as err:
LOG.error(err)
return {"state": "failure", "reason": err}
|
__author__ = 'wjimenez'
|
# Copyright 2018 Google LLC
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
# =============================================================================
"""Benchmarks for TensorFlow.js Layers.
These benchmarks compare the inference and training speed of Keras models of
varying size and architecture, between Python and browser.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import json
import os
import time
from tensorflow import keras
import numpy as np
import tensorflow as tf
# Comparing TF Eager vs TF.js for a fair comparison.
tf.enable_eager_execution()
from tensorflow.python.client import device_lib
import tensorflowjs as tfjs
_FIT_BURNIN_EPOCHS = 1 # How many epochs to call fit() for before timing fit().
_PREDICT_BURNINS = 1 # How many predict() runs to do before timing predict().
_PREDICT_RUNS = 20 # How many runs of predict() to average over.
def benchmark_and_serialize_model(model_name,
description,
model_fn,
input_shape,
target_shape,
optimizer,
loss,
batch_size,
train_epochs,
artifacts_dir):
"""Benchmark a model's fit() and predict() calls; serialize the model.
Args:
model_fn: A function that takes two arguments: `input_shape` and
`target_shape`, and returns a `keras.Model` instance. The model does not
need to have been compiled.
input_shape: Input shape as a `list` or `tuple` of integers.
target_shape: Target shape as a `list` or `tuple` of integers.
optimizer: The optimizer to use during training.
loss: The loss function to use during training.
batch_size: Batch size to use for training.
train_epochs: Number of training epochs, not including the burn-in epoch(s).
artifacts_dir: Directory to save the data in. The data includes:
* topology and weights of the models, in TensorFlow.js format
* metadata and benchmark information in a file named `data.json`,
including:
- the name and description of the model
- the name of the optimizer used during benchmarking of training
- loss value
- the input and output shapes of the model
- benchmark results from Python Keras.
Returns:
1. Total fit() time per epoch, averaged over the epochs not including the
burn-in one.
2. Average predict() time over all the _PREDICT_RUNS.
"""
model = model_fn(input_shape, target_shape)
if train_epochs:
model.compile(optimizer=optimizer, loss=loss)
xs, ys = _get_random_inputs_and_outputs(model, batch_size)
# Perform fit() burn-in.
if train_epochs:
model.fit(xs, ys, batch_size=batch_size, epochs=_FIT_BURNIN_EPOCHS)
# Time fit().
if train_epochs:
train_t_begin = time.time()
model.fit(xs, ys, batch_size=batch_size, epochs=train_epochs)
train_t_end = time.time()
# Perform predict() burn-in.
for _ in range(_PREDICT_BURNINS):
model.predict(xs)
# Time predict() by averaging.
predict_t_begin = time.time()
for _ in range(_PREDICT_RUNS):
model.predict(xs)
predict_t_end = time.time()
# Save the model and weights.
tfjs.converters.save_keras_model(model, artifacts_dir)
# Save data about the model and benchmark results.
if train_epochs:
train_time = (train_t_end - train_t_begin) / train_epochs
else:
train_time = None
predict_time = (predict_t_end - predict_t_begin) / _PREDICT_RUNS
data = {
'name': model_name,
'description': description,
'optimizer': optimizer.__class__.__name__,
'loss': loss,
'input_shape': input_shape,
'target_shape': target_shape,
'batch_size': batch_size,
'train_epochs': train_epochs,
'train_time': train_time,
'predict_time': predict_time,
}
with open(os.path.join(artifacts_dir, 'data.json'), 'wt') as f:
f.write(json.dumps(data))
return train_time, predict_time
def _get_random_inputs_and_outputs(model, batch_size):
"""Synthesize random inputs and outputs based on the model's specs.
Args:
model: An instance of keras Model.
batch_size: Desired batch size.
Returns:
xs: Synthesized random feature tensor(s).
ys: Synthesized random target tensor(s).
"""
input_shapes = [[
int(d) for d in list(inp.shape[1:])] for inp in model.inputs]
xs = []
for in_shape in input_shapes:
x = np.random.rand(*([batch_size] + in_shape))
xs.append(x)
if len(xs) == 1:
xs = xs[0]
output_shapes = [[
int(d) for d in list(inp.shape[1:])] for inp in model.outputs]
ys = []
for output_shape in output_shapes:
y = np.random.rand(*([batch_size] + output_shape))
ys.append(y)
if len(ys) == 1:
ys = ys[0]
return xs, ys
def dense_tiny_model_fn(input_shape, target_shape):
assert len(target_shape) == 1
input_layer = keras.Input(input_shape)
dense_1 = keras.layers.Dense(200, activation='relu')
dense_2 = keras.layers.Dense(target_shape[0])
output = dense_2(dense_1(input_layer))
model = keras.Model(input_layer, output)
return model
def dense_large_model_fn(input_shape, target_shape):
assert len(target_shape) == 1
input_layer = keras.Input(input_shape)
dense_1 = keras.layers.Dense(4000, activation='relu')
dense_2 = keras.layers.Dense(1000, activation='relu')
dense_3 = keras.layers.Dense(500, activation='relu')
dense_4 = keras.layers.Dense(target_shape[0])
output = dense_4(dense_3(dense_2(dense_1(input_layer))))
model = keras.Model(input_layer, output)
return model
def convolutional_model_fn(num_filters, input_shape, target_shape):
"""2D convolutional model."""
kernel_size = 3
pool_size = 2
assert len(target_shape) == 1
num_classes = target_shape[0]
layers = [
keras.layers.Conv2D(num_filters, kernel_size,
padding='valid',
input_shape=input_shape),
keras.layers.Activation('relu'),
keras.layers.Conv2D(num_filters, kernel_size),
keras.layers.Activation('relu'),
keras.layers.MaxPooling2D(pool_size=pool_size),
keras.layers.Flatten(),
keras.layers.Dense(128),
keras.layers.Activation('relu'),
keras.layers.Dense(num_classes),
keras.layers.Activation('softmax')
]
model = keras.models.Sequential(layers)
return model
def mobilenet_model_fn(input_shape, target_shape):
"""MobileNet: A ConvNet from Keras Applications."""
del input_shape, target_shape # Unused.
model = keras.applications.MobileNet(alpha=0.5)
return model
def attention_model_fn(input_shape, target_shape):
"""Attention-based translation model."""
del input_shape, target_shape # Unused.
model_json = '{"class_name":"Model","config":{"input_layers":[["input_1",0,0],["s0",0,0],["c0",0,0]],"name":"model_1","layers":[{"class_name":"InputLayer","inbound_nodes":[],"name":"input_1","config":{"dtype":"float32","name":"input_1","sparse":false,"batch_input_shape":[null,30,38]}},{"class_name":"InputLayer","inbound_nodes":[],"name":"s0","config":{"dtype":"float32","name":"s0","sparse":false,"batch_input_shape":[null,64]}},{"class_name":"Bidirectional","inbound_nodes":[[["input_1",0,0,{}]]],"name":"bidirectional_1","config":{"trainable":true,"name":"bidirectional_1","merge_mode":"concat","layer":{"class_name":"LSTM","config":{"stateful":false,"units":32,"activation":"tanh","recurrent_activation":"hard_sigmoid","dropout":0,"recurrent_dropout":0,"use_bias":true,"trainable":true,"recurrent_initializer":{"class_name":"Orthogonal","config":{"seed":null,"gain":1}},"bias_constraint":null,"unroll":false,"kernel_initializer":{"class_name":"VarianceScaling","config":{"seed":null,"distribution":"uniform","mode":"fan_avg","scale":1}},"unit_forget_bias":true,"bias_initializer":{"class_name":"Zeros","config":{}},"kernel_constraint":null,"activity_regularizer":null,"return_sequences":true,"recurrent_constraint":null,"recurrent_regularizer":null,"bias_regularizer":null,"go_backwards":false,"implementation":1,"name":"attLSTM_2","kernel_regularizer":null,"return_state":false}}}},{"class_name":"RepeatVector","inbound_nodes":[[["s0",0,0,{}]],[["attLSTM_1",0,0,{}]],[["attLSTM_1",1,0,{}]],[["attLSTM_1",2,0,{}]],[["attLSTM_1",3,0,{}]],[["attLSTM_1",4,0,{}]],[["attLSTM_1",5,0,{}]],[["attLSTM_1",6,0,{}]],[["attLSTM_1",7,0,{}]],[["attLSTM_1",8,0,{}]]],"name":"repeat_vector_1","config":{"n":30,"trainable":true,"name":"repeat_vector_1"}},{"class_name":"Concatenate","inbound_nodes":[[["bidirectional_1",0,0,{}],["repeat_vector_1",0,0,{}]],[["bidirectional_1",0,0,{}],["repeat_vector_1",1,0,{}]],[["bidirectional_1",0,0,{}],["repeat_vector_1",2,0,{}]],[["bidirectional_1",0,0,{}],["repeat_vector_1",3,0,{}]],[["bidirectional_1",0,0,{}],["repeat_vector_1",4,0,{}]],[["bidirectional_1",0,0,{}],["repeat_vector_1",5,0,{}]],[["bidirectional_1",0,0,{}],["repeat_vector_1",6,0,{}]],[["bidirectional_1",0,0,{}],["repeat_vector_1",7,0,{}]],[["bidirectional_1",0,0,{}],["repeat_vector_1",8,0,{}]],[["bidirectional_1",0,0,{}],["repeat_vector_1",9,0,{}]]],"name":"concatenate_1","config":{"trainable":true,"name":"concatenate_1","axis":-1}},{"class_name":"Dense","inbound_nodes":[[["concatenate_1",0,0,{}]],[["concatenate_1",1,0,{}]],[["concatenate_1",2,0,{}]],[["concatenate_1",3,0,{}]],[["concatenate_1",4,0,{}]],[["concatenate_1",5,0,{}]],[["concatenate_1",6,0,{}]],[["concatenate_1",7,0,{}]],[["concatenate_1",8,0,{}]],[["concatenate_1",9,0,{}]]],"name":"attDense_1","config":{"bias_constraint":null,"kernel_constraint":null,"units":10,"activity_regularizer":null,"use_bias":true,"bias_regularizer":null,"trainable":true,"activation":"tanh","name":"attDense_1","kernel_initializer":{"class_name":"VarianceScaling","config":{"seed":null,"distribution":"uniform","mode":"fan_avg","scale":1}},"kernel_regularizer":null,"bias_initializer":{"class_name":"Zeros","config":{}}}},{"class_name":"Dense","inbound_nodes":[[["attDense_1",0,0,{}]],[["attDense_1",1,0,{}]],[["attDense_1",2,0,{}]],[["attDense_1",3,0,{}]],[["attDense_1",4,0,{}]],[["attDense_1",5,0,{}]],[["attDense_1",6,0,{}]],[["attDense_1",7,0,{}]],[["attDense_1",8,0,{}]],[["attDense_1",9,0,{}]]],"name":"attDense_2","config":{"bias_constraint":null,"kernel_constraint":null,"units":1,"activity_regularizer":null,"use_bias":true,"bias_regularizer":null,"trainable":true,"activation":"relu","name":"attDense_2","kernel_initializer":{"class_name":"VarianceScaling","config":{"seed":null,"distribution":"uniform","mode":"fan_avg","scale":1}},"kernel_regularizer":null,"bias_initializer":{"class_name":"Zeros","config":{}}}},{"class_name":"Activation","inbound_nodes":[[["attDense_2",0,0,{}]],[["attDense_2",1,0,{}]],[["attDense_2",2,0,{}]],[["attDense_2",3,0,{}]],[["attDense_2",4,0,{}]],[["attDense_2",5,0,{}]],[["attDense_2",6,0,{}]],[["attDense_2",7,0,{}]],[["attDense_2",8,0,{}]],[["attDense_2",9,0,{}]]],"name":"attention_weights","config":{"trainable":true,"activation":"softmax","name":"attention_weights"}},{"class_name":"Dot","inbound_nodes":[[["attention_weights",0,0,{}],["bidirectional_1",0,0,{}]],[["attention_weights",1,0,{}],["bidirectional_1",0,0,{}]],[["attention_weights",2,0,{}],["bidirectional_1",0,0,{}]],[["attention_weights",3,0,{}],["bidirectional_1",0,0,{}]],[["attention_weights",4,0,{}],["bidirectional_1",0,0,{}]],[["attention_weights",5,0,{}],["bidirectional_1",0,0,{}]],[["attention_weights",6,0,{}],["bidirectional_1",0,0,{}]],[["attention_weights",7,0,{}],["bidirectional_1",0,0,{}]],[["attention_weights",8,0,{}],["bidirectional_1",0,0,{}]],[["attention_weights",9,0,{}],["bidirectional_1",0,0,{}]]],"name":"dot_1","config":{"trainable":true,"name":"dot_1","normalize":false,"axes":1}},{"class_name":"InputLayer","inbound_nodes":[],"name":"c0","config":{"dtype":"float32","name":"c0","sparse":false,"batch_input_shape":[null,64]}},{"class_name":"LSTM","inbound_nodes":[[["dot_1",0,0,{}],["s0",0,0,{}],["c0",0,0,{}]],[["dot_1",1,0,{}],["attLSTM_1",0,0,{}],["attLSTM_1",0,2,{}]],[["dot_1",2,0,{}],["attLSTM_1",1,0,{}],["attLSTM_1",1,2,{}]],[["dot_1",3,0,{}],["attLSTM_1",2,0,{}],["attLSTM_1",2,2,{}]],[["dot_1",4,0,{}],["attLSTM_1",3,0,{}],["attLSTM_1",3,2,{}]],[["dot_1",5,0,{}],["attLSTM_1",4,0,{}],["attLSTM_1",4,2,{}]],[["dot_1",6,0,{}],["attLSTM_1",5,0,{}],["attLSTM_1",5,2,{}]],[["dot_1",7,0,{}],["attLSTM_1",6,0,{}],["attLSTM_1",6,2,{}]],[["dot_1",8,0,{}],["attLSTM_1",7,0,{}],["attLSTM_1",7,2,{}]],[["dot_1",9,0,{}],["attLSTM_1",8,0,{}],["attLSTM_1",8,2,{}]]],"name":"attLSTM_1","config":{"stateful":false,"units":64,"activation":"tanh","recurrent_activation":"hard_sigmoid","dropout":0,"recurrent_dropout":0,"use_bias":true,"trainable":true,"recurrent_initializer":{"class_name":"Orthogonal","config":{"seed":null,"gain":1}},"bias_constraint":null,"unroll":false,"kernel_initializer":{"class_name":"VarianceScaling","config":{"seed":null,"distribution":"uniform","mode":"fan_avg","scale":1}},"unit_forget_bias":true,"bias_initializer":{"class_name":"Zeros","config":{}},"kernel_constraint":null,"activity_regularizer":null,"return_sequences":false,"recurrent_constraint":null,"recurrent_regularizer":null,"bias_regularizer":null,"go_backwards":false,"implementation":1,"name":"attLSTM_1","kernel_regularizer":null,"return_state":true}},{"class_name":"Dense","inbound_nodes":[[["attLSTM_1",0,0,{}]],[["attLSTM_1",1,0,{}]],[["attLSTM_1",2,0,{}]],[["attLSTM_1",3,0,{}]],[["attLSTM_1",4,0,{}]],[["attLSTM_1",5,0,{}]],[["attLSTM_1",6,0,{}]],[["attLSTM_1",7,0,{}]],[["attLSTM_1",8,0,{}]],[["attLSTM_1",9,0,{}]]],"name":"attDense_3","config":{"bias_constraint":null,"kernel_constraint":null,"units":11,"activity_regularizer":null,"use_bias":true,"bias_regularizer":null,"trainable":true,"activation":"softmax","name":"attDense_3","kernel_initializer":{"class_name":"VarianceScaling","config":{"seed":null,"distribution":"uniform","mode":"fan_avg","scale":1}},"kernel_regularizer":null,"bias_initializer":{"class_name":"Zeros","config":{}}}}],"output_layers":[["attDense_3",0,0],["attDense_3",1,0],["attDense_3",2,0],["attDense_3",3,0],["attDense_3",4,0],["attDense_3",5,0],["attDense_3",6,0],["attDense_3",7,0],["attDense_3",8,0],["attDense_3",9,0]]}}';
model = keras.models.model_from_json(model_json)
return model
_RNN_TYPE_MAP = {
'SimpleRNN': keras.layers.SimpleRNN,
'GRU': keras.layers.GRU,
'LSTM': keras.layers.LSTM
}
def rnn_model_fn(rnn_type, input_shape, target_shape):
"""Recurrent neural network model."""
rnnConstructor = _RNN_TYPE_MAP[rnn_type]
layers = [rnnConstructor(target_shape[0], input_shape=input_shape)]
model = keras.models.Sequential(layers)
return model
def main():
benchmarks = dict()
benchmarks['metadata'] = {
'keras_version': keras.__version__,
'tensorflow_version': tf.__version__,
'tensorflow_uses_gpu': any(
'gpu' in d.name.lower() for d in device_lib.list_local_devices())
}
benchmarks['config'] = {
'FIT_BURNIN_EPOCHS': _FIT_BURNIN_EPOCHS,
'PREDICT_BURNINS': _PREDICT_BURNINS,
'PREDICT_RUNS': _PREDICT_RUNS
}
benchmarks['models'] = []
# Dense model.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
loss = 'mean_squared_error'
batch_size = 128
train_epochs = 10
input_shape = [100]
target_shape = [1]
names_fns_and_descriptions = [
('dense-tiny',
dense_tiny_model_fn,
'Input([%d]);Dense(200);Dense(%d)|%s|%s' %
(input_shape[0], target_shape[0], optimizer, loss)),
('dense-large',
dense_large_model_fn,
'Input([%d]);Dense(4000);Dense(1000);Dense(500);Dense(%d)|%s|%s' %
(input_shape[0], target_shape[0], optimizer, loss))]
for model_name, model_fn, description in names_fns_and_descriptions:
train_time, predict_time = (
benchmark_and_serialize_model(
model_name,
description,
model_fn,
input_shape,
target_shape,
optimizer,
loss,
batch_size,
train_epochs,
os.path.join(FLAGS.data_root, model_name)))
benchmarks['models'].append(model_name)
print('train_time = %g s' % train_time)
print('predict_time = %g s' % predict_time)
# Conv2d models.
optimizer = tf.train.AdamOptimizer()
loss = 'categorical_crossentropy'
input_shape = [28, 28, 1]
target_shape = [10]
names_fns_and_descriptions = [
("convolutional-%dfilters" % num_filters,
functools.partial(convolutional_model_fn, num_filters),
'Conv2D(%d,3);Conv2D(%d,3);MaxPooling2D(2);'
'Flatten();Dense(128);Dense(10)|%s|%s' %
(num_filters, num_filters, optimizer, loss)) for num_filters in
(1, 2, 4, 8, 16, 24, 26, 28, 30, 32)]
for model_name, model_fn, description in names_fns_and_descriptions:
train_time, predict_time = (
benchmark_and_serialize_model(
model_name,
description,
model_fn,
input_shape,
target_shape,
optimizer,
loss,
batch_size,
train_epochs,
os.path.join(FLAGS.data_root, model_name)))
benchmarks['models'].append(model_name)
print('train_time = %g s' % train_time)
print('predict_time = %g s' % predict_time)
# RNN models.
optimizer = tf.train.RMSPropOptimizer(0.01)
loss = 'categorical_crossentropy'
input_shape = [20, 20]
target_shape = [20]
batch_size = 128
train_epochs = 10
names_fns_and_descriptions = [
("rnn-%s" % rnn_type,
functools.partial(rnn_model_fn, rnn_type),
'%s(input_shape=%s, target_shape=%s)|%s|%s' %
(rnn_type, input_shape, target_shape, optimizer, loss))
for rnn_type in ('SimpleRNN', 'GRU', 'LSTM')]
for model_name, model_fn, description in names_fns_and_descriptions:
train_time, predict_time = (
benchmark_and_serialize_model(
model_name,
description,
model_fn,
input_shape,
target_shape,
optimizer,
loss,
batch_size,
train_epochs,
os.path.join(FLAGS.data_root, model_name)))
benchmarks['models'].append(model_name)
print('train_time = %g s' % train_time)
print('predict_time = %g s' % predict_time)
# Mobilenet (inference only).
input_shape = None # Determine from the Model object itself.
target_shape = None # Determine from the Model object itself.
batch_size = 8
train_epochs = 0
optimizer = None
loss = None
names_fns_and_descriptions = [[
'mobilenet',
mobilenet_model_fn,
'mobilenet']]
for model_name, model_fn, description in names_fns_and_descriptions:
train_time, predict_time = (
benchmark_and_serialize_model(
model_name,
description,
model_fn,
input_shape,
target_shape,
optimizer,
loss,
batch_size,
train_epochs,
os.path.join(FLAGS.data_root, model_name)))
benchmarks['models'].append(model_name)
if train_epochs > 0:
print('train_time = %g s' % train_time)
print('predict_time = %g s' % predict_time)
# Attention model
input_shape = None # Determine from the Model object itself.
target_shape = None # Determine from the Model object itself.
batch_size = 32
train_epochs = 0
optimizer = None
loss = None
names_fns_and_descriptions = [[
'attention',
attention_model_fn,
'Attention-based translation model: Function model with bidirectional LSTM layers']]
for model_name, model_fn, description in names_fns_and_descriptions:
train_time, predict_time = (
benchmark_and_serialize_model(
model_name,
description,
model_fn,
input_shape,
target_shape,
optimizer,
loss,
batch_size,
train_epochs,
os.path.join(FLAGS.data_root, model_name)))
benchmarks['models'].append(model_name)
if train_epochs > 0:
print('train_time = %g s' % train_time)
print('predict_time = %g s' % predict_time)
with open(os.path.join(FLAGS.data_root, 'benchmarks.json'), 'wt') as f:
json.dump(benchmarks, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Benchmarks demo.')
parser.add_argument(
'data_root',
type=str,
help='Local path for saving the results of benchmarks.')
FLAGS, _ = parser.parse_known_args()
main()
|
''' 1. Ignore if it already exists '''
# Search @ class.AniImageBox
def __init__(self, layer = "UI"):
Window.__init__(self, layer)
# Add below
if app.ENABLE_SLOT_MACHINE_SYSTEM:
self.end_frame_event = None
self.key_frame_event = None
''' 2. Ignore if it already exists '''
# Search @ class.AniImageBox
def __del__(self):
Window.__del__(self)
# Add below
if app.ENABLE_SLOT_MACHINE_SYSTEM:
self.end_frame_event = None
self.key_frame_event = None
''' 3. Ignore if it already exists '''
# Search
def OnEndFrame(self):
pass
# Replace with
if app.ENABLE_SLOT_MACHINE_SYSTEM:
def OnEndFrame(self):
if self.end_frame_event:
self.end_frame_event()
def SetEndFrameEvent(self, event):
self.end_frame_event = event
def ResetFrame(self):
wndMgr.ResetFrame(self.hWnd)
def OnKeyFrame(self, cur_frame):
if self.key_frame_event:
self.key_frame_event(cur_frame)
def SetKeyFrameEvent(self, event):
self.key_frame_event = event
else:
def OnEndFrame(self):
pass
|
import os
import sys
import shutil
from pathlib import Path
from typing import Iterator
import numpy as np
from jina import Document
file_dir = Path(__file__).parent
sys.path.append(str(file_dir.parent))
def random_docs(num_docs, chunks_per_doc=5, embed_dim=10, jitter=1, start_id=0, embedding=True) -> Iterator['Document']:
next_chunk_doc_id = start_id + num_docs
for j in range(num_docs):
doc_id = start_id + j
d = Document(id=doc_id)
d.text = b'hello world'
d.tags['id'] = doc_id
if embedding:
d.embedding = np.random.random([embed_dim + np.random.randint(0, jitter)])
d.update_content_hash()
for _ in range(chunks_per_doc):
chunk_doc_id = next_chunk_doc_id
c = Document(id=chunk_doc_id)
c.text = 'i\'m chunk %d from doc %d' % (chunk_doc_id, doc_id)
if embedding:
c.embedding = np.random.random([embed_dim + np.random.randint(0, jitter)])
c.tags['parent_id'] = doc_id
c.tags['id'] = chunk_doc_id
c.update_content_hash()
d.chunks.append(c)
next_chunk_doc_id += 1
yield d
def rm_files(file_paths):
for file_path in file_paths:
file_path = Path(file_path)
if file_path.exists():
if file_path.is_file():
os.remove(file_path)
elif file_path.is_dir():
shutil.rmtree(file_path, ignore_errors=False, onerror=None)
|
# coding: utf-8
"""
UCS Starship API
This is the UCS Starship REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class TechsupportmanagementTechSupportStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoMoRef]',
'create_time': 'datetime',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoMoRef',
'tags': 'list[MoTag]',
'device_registration': 'MoMoRef',
'file_name': 'str',
'reason': 'str',
'request_ts': 'datetime',
'status': 'str',
'tech_support_request': 'MoMoRef',
'techsupport_download_url': 'str'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'tags': 'Tags',
'device_registration': 'DeviceRegistration',
'file_name': 'FileName',
'reason': 'Reason',
'request_ts': 'RequestTs',
'status': 'Status',
'tech_support_request': 'TechSupportRequest',
'techsupport_download_url': 'TechsupportDownloadUrl'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, tags=None, device_registration=None, file_name=None, reason=None, request_ts=None, status=None, tech_support_request=None, techsupport_download_url=None):
"""
TechsupportmanagementTechSupportStatus - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._tags = None
self._device_registration = None
self._file_name = None
self._reason = None
self._request_ts = None
self._status = None
self._tech_support_request = None
self._techsupport_download_url = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if tags is not None:
self.tags = tags
if device_registration is not None:
self.device_registration = device_registration
if file_name is not None:
self.file_name = file_name
if reason is not None:
self.reason = reason
if request_ts is not None:
self.request_ts = request_ts
if status is not None:
self.status = status
if tech_support_request is not None:
self.tech_support_request = tech_support_request
if techsupport_download_url is not None:
self.techsupport_download_url = techsupport_download_url
@property
def account_moid(self):
"""
Gets the account_moid of this TechsupportmanagementTechSupportStatus.
The Account ID for this managed object.
:return: The account_moid of this TechsupportmanagementTechSupportStatus.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this TechsupportmanagementTechSupportStatus.
The Account ID for this managed object.
:param account_moid: The account_moid of this TechsupportmanagementTechSupportStatus.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this TechsupportmanagementTechSupportStatus.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this TechsupportmanagementTechSupportStatus.
:rtype: list[MoMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this TechsupportmanagementTechSupportStatus.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this TechsupportmanagementTechSupportStatus.
:type: list[MoMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this TechsupportmanagementTechSupportStatus.
The time when this managed object was created.
:return: The create_time of this TechsupportmanagementTechSupportStatus.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this TechsupportmanagementTechSupportStatus.
The time when this managed object was created.
:param create_time: The create_time of this TechsupportmanagementTechSupportStatus.
:type: datetime
"""
self._create_time = create_time
@property
def mod_time(self):
"""
Gets the mod_time of this TechsupportmanagementTechSupportStatus.
The time when this managed object was last modified.
:return: The mod_time of this TechsupportmanagementTechSupportStatus.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this TechsupportmanagementTechSupportStatus.
The time when this managed object was last modified.
:param mod_time: The mod_time of this TechsupportmanagementTechSupportStatus.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this TechsupportmanagementTechSupportStatus.
A unique identifier of this Managed Object instance.
:return: The moid of this TechsupportmanagementTechSupportStatus.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this TechsupportmanagementTechSupportStatus.
A unique identifier of this Managed Object instance.
:param moid: The moid of this TechsupportmanagementTechSupportStatus.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this TechsupportmanagementTechSupportStatus.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this TechsupportmanagementTechSupportStatus.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this TechsupportmanagementTechSupportStatus.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this TechsupportmanagementTechSupportStatus.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this TechsupportmanagementTechSupportStatus.
An array of owners which represent effective ownership of this object.
:return: The owners of this TechsupportmanagementTechSupportStatus.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this TechsupportmanagementTechSupportStatus.
An array of owners which represent effective ownership of this object.
:param owners: The owners of this TechsupportmanagementTechSupportStatus.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this TechsupportmanagementTechSupportStatus.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this TechsupportmanagementTechSupportStatus.
:rtype: MoMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this TechsupportmanagementTechSupportStatus.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this TechsupportmanagementTechSupportStatus.
:type: MoMoRef
"""
self._parent = parent
@property
def tags(self):
"""
Gets the tags of this TechsupportmanagementTechSupportStatus.
An array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this TechsupportmanagementTechSupportStatus.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this TechsupportmanagementTechSupportStatus.
An array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this TechsupportmanagementTechSupportStatus.
:type: list[MoTag]
"""
self._tags = tags
@property
def device_registration(self):
"""
Gets the device_registration of this TechsupportmanagementTechSupportStatus.
:return: The device_registration of this TechsupportmanagementTechSupportStatus.
:rtype: MoMoRef
"""
return self._device_registration
@device_registration.setter
def device_registration(self, device_registration):
"""
Sets the device_registration of this TechsupportmanagementTechSupportStatus.
:param device_registration: The device_registration of this TechsupportmanagementTechSupportStatus.
:type: MoMoRef
"""
self._device_registration = device_registration
@property
def file_name(self):
"""
Gets the file_name of this TechsupportmanagementTechSupportStatus.
Techsupport file name
:return: The file_name of this TechsupportmanagementTechSupportStatus.
:rtype: str
"""
return self._file_name
@file_name.setter
def file_name(self, file_name):
"""
Sets the file_name of this TechsupportmanagementTechSupportStatus.
Techsupport file name
:param file_name: The file_name of this TechsupportmanagementTechSupportStatus.
:type: str
"""
self._file_name = file_name
@property
def reason(self):
"""
Gets the reason of this TechsupportmanagementTechSupportStatus.
Reason for techsupport failure, if any
:return: The reason of this TechsupportmanagementTechSupportStatus.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this TechsupportmanagementTechSupportStatus.
Reason for techsupport failure, if any
:param reason: The reason of this TechsupportmanagementTechSupportStatus.
:type: str
"""
self._reason = reason
@property
def request_ts(self):
"""
Gets the request_ts of this TechsupportmanagementTechSupportStatus.
Indicates the time at which the techsupport request was initiated
:return: The request_ts of this TechsupportmanagementTechSupportStatus.
:rtype: datetime
"""
return self._request_ts
@request_ts.setter
def request_ts(self, request_ts):
"""
Sets the request_ts of this TechsupportmanagementTechSupportStatus.
Indicates the time at which the techsupport request was initiated
:param request_ts: The request_ts of this TechsupportmanagementTechSupportStatus.
:type: datetime
"""
self._request_ts = request_ts
@property
def status(self):
"""
Gets the status of this TechsupportmanagementTechSupportStatus.
Status of techsupport collection. Valid values are Pending, CollectionInProgress, CollectionFailed, CollectionComplete, UploadInProgress, UploadPartsComplete, UploadFailed and Completed. The final status will be either CollectionFailed or UploadFailed if there is a failure and Completed if the request completed successfully and the file was uploaded to S3. All the remaining status values indicates the progress of techsupport collection.
:return: The status of this TechsupportmanagementTechSupportStatus.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this TechsupportmanagementTechSupportStatus.
Status of techsupport collection. Valid values are Pending, CollectionInProgress, CollectionFailed, CollectionComplete, UploadInProgress, UploadPartsComplete, UploadFailed and Completed. The final status will be either CollectionFailed or UploadFailed if there is a failure and Completed if the request completed successfully and the file was uploaded to S3. All the remaining status values indicates the progress of techsupport collection.
:param status: The status of this TechsupportmanagementTechSupportStatus.
:type: str
"""
self._status = status
@property
def tech_support_request(self):
"""
Gets the tech_support_request of this TechsupportmanagementTechSupportStatus.
:return: The tech_support_request of this TechsupportmanagementTechSupportStatus.
:rtype: MoMoRef
"""
return self._tech_support_request
@tech_support_request.setter
def tech_support_request(self, tech_support_request):
"""
Sets the tech_support_request of this TechsupportmanagementTechSupportStatus.
:param tech_support_request: The tech_support_request of this TechsupportmanagementTechSupportStatus.
:type: MoMoRef
"""
self._tech_support_request = tech_support_request
@property
def techsupport_download_url(self):
"""
Gets the techsupport_download_url of this TechsupportmanagementTechSupportStatus.
The Url to download the techsupport file
:return: The techsupport_download_url of this TechsupportmanagementTechSupportStatus.
:rtype: str
"""
return self._techsupport_download_url
@techsupport_download_url.setter
def techsupport_download_url(self, techsupport_download_url):
"""
Sets the techsupport_download_url of this TechsupportmanagementTechSupportStatus.
The Url to download the techsupport file
:param techsupport_download_url: The techsupport_download_url of this TechsupportmanagementTechSupportStatus.
:type: str
"""
self._techsupport_download_url = techsupport_download_url
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, TechsupportmanagementTechSupportStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
#
# @lc app=leetcode id=72 lang=python3
#
# [72] Edit Distance
#
# https://leetcode.com/problems/edit-distance/description/
#
# algorithms
# Hard (40.50%)
# Likes: 2795
# Dislikes: 44
# Total Accepted: 213.4K
# Total Submissions: 526.3K
# Testcase Example: '"horse"\n"ros"'
#
# Given two words word1 and word2, find the minimum number of operations
# required to convert word1 to word2.
#
# You have the following 3 operations permitted on a word:
#
#
# Insert a character
# Delete a character
# Replace a character
#
#
# Example 1:
#
#
# Input: word1 = "horse", word2 = "ros"
# Output: 3
# Explanation:
# horse -> rorse (replace 'h' with 'r')
# rorse -> rose (remove 'r')
# rose -> ros (remove 'e')
#
#
# Example 2:
#
#
# Input: word1 = "intention", word2 = "execution"
# Output: 5
# Explanation:
# intention -> inention (remove 't')
# inention -> enention (replace 'i' with 'e')
# enention -> exention (replace 'n' with 'x')
# exention -> exection (replace 'n' with 'c')
# exection -> execution (insert 'u')
#
#
#
# @lc code=start
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
return self.dp_bottom_up(word1, word2)
def dp_bottom_up(self, word1: str, word2: str) -> int:
"""
dp bottom up solution
"""
memo = [[0] * (len(word2) + 1) for _ in range(len(word1) + 1)]
for i in range(len(word1) + 1):
memo[i][0] = i
for j in range(len(word2) + 1):
memo[0][j] = j
for i in range(1, len(word1) + 1):
for j in range(1, len(word2) + 1):
if word1[i - 1] == word2[j - 1]:
memo[i][j] = memo[i - 1][j - 1]
else:
memo[i][j] = 1 + min(memo[i - 1][j],
memo[i][j - 1],
memo[i - 1][j - 1])
return memo[-1][-1]
def dp_memoize(self, word1: str, word2: str) -> int:
"""
dp memoized solution
"""
memo = [[None] * len(word2) for _ in range(len(word1))]
def helper(i: int, j: int) -> int:
if i >= len(word1) and j >= len(word2):
return 0
if i >= len(word1) or j >= len(word2):
return max(len(word1) - i, len(word2) - j)
if word1[i:] == word2[j:]:
return 0
if memo[i][j] is None:
if word1[i] == word2[j]:
return helper(i + 1, j + 1)
insert = helper(i + 1, j)
remove = helper(i, j + 1)
replace = helper(i + 1, j + 1)
res = 1 + min(insert, remove, replace)
memo[i][j] = res
return memo[i][j]
return helper(0, 0)
def dp_recursive(self, word1: str, word2: str) -> int:
"""
DP recursive Solution
Let P(word1, word2) := min number of moves to match word 1 and word 2
P(word1, word2) =
1. 0 if word1 == word2
2. 1 + min(A, B, C)
where A = insert a char, B = remove a char, C = replace a char
"""
def helper(word1: str, word2: str) -> int:
if word1 == word2:
return 0
if word1 == "" or word2 == "":
return max(len(word1), len(word2))
if word1[0] == word2[0]:
return helper(word1[1:], word2[1:])
insert = helper(word1[1:], word2)
remove = helper(word1, word2[1:])
replace = helper(word1[1:], word2[1:])
return 1 + min(insert, remove, replace)
return helper(word1, word2)
# @lc code=end
if __name__ == "__main__":
print(Solution().minDistance("horse", "ros"), 3)
|
from django.test import TestCase
from django.db import IntegrityError
from django.utils import timezone
from django.contrib.auth.models import User
from ostinato.blog.models import BlogEntryBase
from ..models import Entry
from .utils import create_objects
class BlogEntryBaseTestCase(TestCase):
def test_model_exists(self):
BlogEntryBase
def test_model_is_abstract(self):
self.assertTrue(BlogEntryBase._meta.abstract)
def test_model_instance(self):
u = User.objects.create(
username='user1', password='', email='test@example.com')
Entry.objects.create(
title='Entry Title 1',
slug='entry-title-1',
content='Entry Content 1',
author=u,
)
def test_slug_is_unique(self):
create_objects()
with self.assertRaises(IntegrityError):
Entry.objects.create(
title='Invalid', slug='entry-title-1',
author=User.objects.all()[0])
def test_unicode_name(self):
create_objects()
self.assertEqual('Entry Title 1', str(Entry.objects.get(id=1)))
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
image = models.ImageField(upload_to='images/', null=True, blank=True)
bio = models.TextField(blank=True, null=True)
def __str__(self):
return self.user.username
def create_user_profile(sender, **kwargs):
if kwargs['created']:
UserProfile.objects.create(user=kwargs['instance'])
post_save.connect(create_user_profile, sender=User)
|
"""
Plan pieces that may be useful for assembling plans.
This is the LCLS counterpart to `bluesky.plan_stubs`.
The plans in this module are not meant to be run individually, instead these
are intended as building blocks for other complete plans.
"""
import logging
from bluesky.plan_stubs import subscribe
from bluesky.plans import count
from bluesky.preprocessors import stub_wrapper
import yaml
from nabs.streams import AverageStream
logger = logging.getLogger(__name__)
def measure_average(detectors, num, delay=None, stream=None):
"""
Measure an average over a number of shots from a set of detectors.
Parameters
----------
detectors : list
List of detectors to read
num : int
Number of shots to average together
delay : iterable or scalar, optional
Time delay between successive readings. See `bluesky.plans.count`
for more details
stream : `nabs.streams.AverageStream`, optional
If a plan will call `measure_average` multiple times, a single
``AverageStream`` instance can be created and then passed in on each
call. This allows other callbacks to subscribe to the averaged data
stream. If no ``AverageStream`` is provided then one is created for the
purpose of this function.
Returns
-------
averaged_event : dict
A dictionary of all the measurements taken from the list of detectors
averaged for ``num`` shots. The keys follow the same naming convention
as that will appear in the event documents i.e "{name}_{field}"
Notes
-----
The returned average dictionary will only contain keys for 'number' or
'array' fields. Field types that can not be averaged such as 'string' will
be ignored, do not expect them in the output.
"""
# Create a stream and subscribe if not given one
if not stream:
stream = AverageStream(num=num)
yield from subscribe('all', stream)
# Manually kick the LiveDispatcher to emit a start document because we
# will not see the original one since this is subscribed after open_run
stream.start({'uid': None})
# Ensure we sync our stream with request if using a prior one
else:
stream.num = num
# Measure our detectors
yield from stub_wrapper(count(detectors, num=num, delay=delay))
# Return the measured average as a dictionary for use in adaptive plans
return stream.last_event
def update_sample(sample_name, path, n_shots):
"""
Update the current sample information after a run.
Updates the `status` values of each target in the sample,
from `False` to `True` to indicate that it is shot.
Parameters
----------
sample_name : str
A name to identify the sample grid, should be snake_case style.
path : str
Path to the `.yml` file. Defaults to the path defined when
creating this object.
n_shots : int
Indicates how many targets have been shot.
"""
info = get_sample_targets(sample_name, path)
data = {}
# list of dictionaries
xx = info[0]
yy = info[1]
# find the index of the targets that is next to be shot
x_index = next((index for (index, d) in enumerate(xx)
if d["status"] is False), None)
if x_index is None:
raise IndexError('Could not get a target index that has not been shot,'
' probably all targets were shot from this sample?')
temp_x, temp_y = [], []
for i in range(n_shots):
# update the status for the next target where the status is False
x_target = next((item for item in xx if item['status'] is False), None)
y_target = next((item for item in yy if item["status"] is False), None)
# should not be getting here but just in case:
if x_target is None:
raise IndexError('Could not update the status of targets. '
'Probably all targets from this sample were shot '
'already....')
x_target['status'] = True
y_target['status'] = True
temp_x.append(x_target)
temp_y.append(y_target)
# update the list original list of target
xx[x_index:(x_index + len(temp_x))] = temp_x
yy[x_index:(x_index + len(temp_y))] = temp_y
data['xx'] = xx
data['yy'] = yy
with open(path) as sample_file:
yaml_dict = yaml.safe_load(sample_file) or {}
yaml_dict[sample_name].update(data)
with open(path, 'w') as sample_file:
yaml.safe_dump(yaml_dict, sample_file,
sort_keys=False, default_flow_style=False)
def get_sample_targets(sample_name, path):
"""
Get the `xx` and `yy` target information from a saved sample.
Given a sample name, get the x, y grid points that are mapped for that
sample.
Parameters
----------
sample_name : str
The name of the sample to get the mapped points from. To see the
available mapped samples call the `mapped_samples()` method.
path : str, optional
Path to the samples yaml file.
Returns
-------
`xx`, `yy` : tuple
Returns two lists of dictionaries, with information about the targets.
"""
data = None
with open(path) as sample_file:
try:
data = yaml.safe_load(sample_file)
except yaml.YAMLError as err:
logger.error('Error when loading the samples yaml file: %s',
err)
raise err
if data is None:
raise Exception('The file is empty, no sample grid yet. '
'Please use `save_presets` to insert grids '
'in the file.')
try:
sample = data[str(sample_name)]
xx = sample['xx']
yy = sample['yy']
return xx, yy
except Exception:
err_msg = (f'This sample {sample_name} might not exist in the file.')
raise Exception(err_msg)
|
def area(larg, comp):
a = larg * comp
print('A área de um terreno de {:.2f} x {:.2f} é de {:.2f} m².'.format(larg, comp, a))
# Programa principal
print('{:=^55}'.format(' Controle de Terrenos '))
l = float(input('LARGURA (M): '))
c = float(input('COMPRIMENTO (M):'))
area(l, c)
print('=' * 55)
|
class Solution:
# @param A : list of list of integers
# @param B : list of integers
# @param C : list of integers
# @param D : list of integers
# @param E : list of integers
# @return a list of integers
def solve(self, A, B, C, D, E):
sum = []
print(A)
for i in range(len(A)):
for j in range(len(A[0])):
if i + j == 0:
continue
elif i == 0:
A[i][j] += A[i][j - 1]
elif j == 0:
A[i][j] += A[i - 1][j]
else:
A[i][j] += A[i][j - 1] + A[i - 1][j] - A[i - 1][j - 1]
print(A)
for i in range(len(B)):
x, y, a, b, temp = B[i] - 1, C[i] - 1, D[i] - 1, E[i] - 1, 0
if x + y == 0 or a + b == 0:
temp = A[a][b]
elif a == 0 or x == 0:
temp = A[a][b] - A[a][y - 1]
elif y == 0 or b == 0:
temp = A[a][b] - A[x - 1][b]
else:
temp = A[a][b] - A[a][y - 1] - A[x - 1][b] + A[x - 1][y - 1]
sum.append(temp)
return sum
a = "40 46 56 78 -72 -2 98 -21 94 19 23 76 -70 73 -86 52 36 22 74 -55 31 -16 23 14 88 -16 51 63 -63 -33 -72 -59 20 26 -4 -68 -10 -61 80 51 24 -67 32 57 -16 9 13 -81 76 29 83 83 -47 -6 -45 -13 -88 -13 67 -30 39 -63 -61 -49 -7 0 55 12 41 37 -67 73 60 -57 -82 -44 -92 69 -58 94 -29 41 41 -50 52 32 -97 100 -66 -23 -54 66 23 -17 8 -85 86 68 -97 28 -34 99 -24 78 62 -76 54 -3 71 25 30 56 17 -67 40 -69 -68 62 -15 89 13 73 -36 74 -38 -70 -100 -3 -54 -100 -37 8 39 -24 8 23 30 -64 -75 -92 42 69 70 -11 -88 -46 -52 -69 -21 -83 -26 100 -62 -67 -62 88 28 7 36 -7 -70 61 69 21 -95 -60 80 65 100 51 7 -43 0 95 -96 -38 -25 69 96 -77 -35 -93 100 -20 52 13 -75 20 -83 -40 89 -47 -69 -38 -20 -56 31 96 -94 82 -14 32 79 33 -20 39 59 -30 -1 -54 10 -59 -83 56 82 32 -84 67 -24 -83 -70 10 49 38 68 91 -58 79 -16 59 -63 48 81 63 9 -25 -20 59 12 17 -43 4 7 -76 31 0 30 12 38 -100 -71 -91 97 59 -77 33 11 67 96 23 -22 79 77 65 -4 -77 -32 -44 -93 -31 -65 59 -35 96 19 10 36 -58 -21 -19 35 -24 61 19 -46 -45 90 -90 -28 -37 -60 -10 61 6 42 29 -39 21 -88 -86 -16 -72 68 -11 88 54 88 77 5 40 90 5 60 64 -18 20 -27 2 -97 44 -69 71 43 -23 -51 -79 -9 -38 69 65 99 34 -4 -29 -57 77 -5 81 100 -57 25 61 83 -94 67 -53 -8 -61 32 -88 -47 70 -74 -88 2 -20 -74 -1 -15 -14 -32 -1 63 75 -1 -5 80 54 72 -12 -35 6 -92 72 -65 60 -63 -26 71 16 -54 23 -37 0 -19 -90 57 71 -25 38 -96 -56 13 -22 45 -43 66 -10 100 52 -24 88 67 -64 -45 -9 25 48 47 62 -69 -92 49 -46 -50 -17 88 -97 -53 -92 -33 26 18 82 56 -4 56 -8 89 -69 -88 -9 47 -66 -67 26 -29 -16 -82 -42 86 -50 -82 -50 8 -85 28 -79 45 -91 29 -88 -93 -33 2 -34 93 -38 9 -40 35 -93 -51 20 -41 -86 -17 -22 -72 -88 16 -67 -70 -98 -45 55 -89 71 60 -72 -65 -57 30 81 16 -49 5 66 -40 35 90 48 45 -95 37 -2 39 89 -91 -50 84 25 44 -27 -36 65 -56 73 -9 -59 -31 47 64 83 65 58 74 78 -34 -27 80 -6 -23 24 -16 48 -72 39 13 81 -87 54 -62 -88 -4 -18 46 60 14 2 -2 71 88 5 -67 -16 95 54 9 25 -19 83 56 74 -77 76 47 78 88 92 22 18 -7 92 56 99 -74 74 -66 52 18 39 30 -67 -36 36 87 46 39 15 -65 76 -77 -90 -83 40 73 86 6 89 -87 -15 79 -74 80 -45 -76 92 -8 -85 -47 38 84 85 12 92 19 -65 -18 28 -65 41 44 -14 -93 78 15 -87 37 -34 -84 -86 23 -88 -28 -32 21 71 72 78 -56 -67 -82 12 13 -92 16 7 49 -22 -70 -84 -48 53 -60 -91 8 -73 -32 -42 -84 41 65 -40 -21 -31 30 -70 21 88 29 70 74 -41 -99 -40 -73 -54 45 -100 -21 76 77 38 50 -83 35 -31 27 5 59 100 -21 75 -26 91 75 -61 -77 -30 -78 27 -55 -95 0 -68 56 -81 -32 -38 53 62 75 -5 35 27 89 8 -18 79 -88 -18 78 9 -20 -44 52 -41 -44 -64 -74 3 -54 73 53 93 -87 50 83 78 47 -49 28 8 -12 18 -35 42 -5 30 -30 14 23 32 19 29 -67 27 95 -2 -90 88 25 -55 76 17 -21 -94 72 -34 -45 -73 41 99 -86 92 69 53 -84 27 -30 87 58 -64 48 52 26 -45 -58 68 40 48 -11 75 72 3 93 -46 -87 -62 -62 44 87 73 43 -14 -10 -35 -86 -1 0 35 -53 0 -35 -35 -27 -57 -12 61 67 -76 -35 48 -12 -82 -3 14 65 26 19 91 -26 54 -47 76 63 47 98 87 -18 -39 29 -12 -89 77 -83 -2 99 -67 -16 -24 48 -6 -57 -35 86 20 67 38 81 11 99 24 86 -78 -37 -34 -31 73 -87 80 36 -93 -3 95 54 -70 -76 -16 -95 -48 -19 18 87 -48 -60 -17 -90 45 -12 -1 37 -28 -92 58 -91 -61 -26 -6 97 -31 84 -56 56 -15 17 -87 -18 -51 -72 48 -24 57 -25 -34 62 -63 -28 -69 81 64 69 -27 73 -47 -77 -90 -37 -3 72 27 87 -35 -9 -87 -9 85 95 76 -11 14 -11 55 -42 -95 -10 94 4 -18 100 -22 -20 -62 28 -68 57 -85 -50 -95 30 -99 -20 11 -6 68 -10 -31 -32 50 -28 44 -44 68 -74 43 -18 85 -78 45 24 93 87 -46 -11 -40 48 -66 -76 -62 -11 10 5 -85 -59 71 26 -34 79 -5 -61 -60 4 -72 -83 -64 4 87 -69 85 38 -39 82 -16 99 -51 -37 -65 89 -37 62 59 -45 -63 -12 -39 31 62 -50 -8 29 -12 -4 -55 92 -41 72 -7 74 24 -44 -78 -86 64 10 55 57 -65 -37 -73 41 20 -55 -30 36 92 24 70 -73 -25 -64 -75 -41 -79 -84 -53 69 -6 42 84 -36 24 45 20 39 78 -7 27 35 60 36 42 -63 64 23 -40 -93 -35 96 83 -62 -11 61 46 -36 48 -28 -67 -70 92 -14 90 -85 14 38 -76 53 -88 84 21 -84 -24 45 62 8 3 56 -91 -1 -13 -30 1 65 -23 48 49 -10 67 -13 58 -79 -81 -38 -50 77 -51 97 90 50 -11 -19 91 94 28 -53 -60 -7 -67 97 -67 10 18 83 -15 -74 55 77 -9 -37 -74 37 -45 63 27 -66 -69 35 -67 20 96 -84 21 53 1 20 85 -72 -65 94 36 -63 78 69 100 88 6 -73 -12 24 -94 -35 90 7 -97 -61 -43 36 -57 15 -6 15 -70 -68 39 10 66 12 27 -26 -2 -16 -93 -17 60 -49 49 48 -55 82 43 -36 55 60 38 -3 37 -68 14 82 -91 -90 -100 -91 -94 12 72 29 21 100 82 -90 10 74 -39 -4 65 -57 -94 73 -56 80 93 -82 36 -31 35 -25 6 -99 76 -73 -12 -23 40 21 44 3 73 -74 -36 -69 1 76 71 5 61 80 -10 -45 37 18 5 14 -33 55 -63 3 -64 83 96 -85 67 84 38 -29 -64 77 90 31 -99 -19 -71 -80 -95 53 38 20 15 -87 -65 87 70 24 43 -59 59 -66 -52 34 47 -10 -21 41 65 -29 -62 -61 27 -23 -1 95 -53 10 -76 54 -37 51 -91 -33 -83 42 -49 40 100 -93 -21 -89 -25 9 14 -78 29 -99 -57 -31 75 39 -41 55 79 16 55 -91 11 92 7 -64 -78 -69 70 -45 -65 7 32 -17 -65 5 -98 34 44 -26 -60 -4 68 9 -62 -95 40 -80 -23 -88 64 -53 44 8 33 -63 19 100 -96 94 -32 65 -46 -63 -44 70 -6 66 59 -3 61 -35 -51 -26 75 -2 99 97 -92 48 -51 62 81 -98 87 81 69 -85 -32 -53 28 -1 63 -96 66 -17 -14 -4 49 -4 -93 43 100 5 90 -51 -51 1 -98 15 -50 -90 27 0 -75 -44 -94 -27 -79 -72 57 -38 -99 -47 7 21 -92 0 6 20 26 45 19 -17 -39 -98 93 6 93 6 -13 -6 -78 -32 -72 12 -82 -54 -42 -68 17 -68 -31 -45 -45 -64 85 -45 62 93 -79 22 54 53 26 73 79 5 75 -95 -42 -95 8 -89 86 11 -46 55 -32 -61 3 -99 -67 -88 -6 -33 23 84 -98 -48 83 -10 100 24 -45 -56 24 -71 71 -7 96 7 71 8 -47 -84 67 -29 62 -98 -99 -51 -67 65 -21 -95 16 21 -18 12 -79 -86 0 92 62 -52 0 64 22 -97 -58 -72 -33 49 80 -13 -94 20 -21 -16 -23 -69 30 -96 47 43 23 77 -63 -76 32 5 50 18 53 -53 34 36 -100 -24 38 100 -57 -43 -93 -31 72 90 61 -76 -13 99 -31 -38 -26 63 -26 -58 -33 39 -32 -81 35 38 81 -38 82 -5 -66 -92 55 -98 57 45 66 55 50 -10 -73 -9 -23 -95 -81 39 69 -53 -62 83 52 45 84 29 -48 -14 97 60 -26 83 -29 -52 20 84 48 18 24 15 1 25 -8 -61 -65 98 -97 -53 -62 16 -68 -90 26 -60 -53 -44 -14 -71 -52 -6 0 -77 1 -92 -61 31 -20 -61 0 -13 100 -4 -58 -51 15 28 47 -98 -70 -75 51 -12 -33 -65 19 92 -63 3 59 -54 66 -96 93 -35 -70 -32 79 63 80 -74 -42 -34 -45 -87 39 87 -75 78 -11 -8 9 38 83 -28 97 8 -6 98 29 76 -62 -73 -57 20 21 35 60 52 -55 -1 -45 35 -53 36 -71 99 95 19 -25 -2 23 100 12 90 84 5 -21 -98 -2 86 -63 -76 41 -60 79 88 -87 -26 -86 -88 4 -89 57 -91 68 99 22 6 -72 53 7 -87 -48 9 47 26 3 -45 81 -80 -30 69 -89 -95 -64 92 14 51 15 -52 -27 31 -86 28 -58 7 39 21 -54 -7 -26 67 67 -62 -14 -89 16 -86 -2 -18 -12 83 -88 -87 7 -23 -17 5 -36 52 -27 27 68 21 75 19 23 22 0 25 32 -70 -69 9 62 3 44 11 33 -22 5 49 -18 -32 -89 -42 12 -57 1 -57 -99 82 -28 9 8 87 12 -70 -46 -63 -45 -70 -29 80 74 49 59 -65 -82 -24 84 79 -49 82 58 0 12 78 25 -49 4 -20 83 85 -85 -90 83 -69 22 -28 83 -48 71 47 24 9 -2 -28 52 -18 85 47 -63 50 25 -16 -14 -13 -61 2 77 36 -26 26 34 -96 65 87 -14 -86 62 -45 -61 36 22 -83 60 -32 31 31 -44 -12 -40 -10 -88 82 -15 96 41 -86 69 86 5 -86 95 -99 -20 95 -83 16 12 61 87 -24 -19 -53 -8 -89 69 -96 78 46 78 24 11 -95 89 -52 -66 -98 -36 -19 52 -79 -15 34 22 60 -72 -38 -50 -72 90 -81 48 -47 -11 97 -85 63 -98 -63 -67 -54 9 62 -85 75 -34 98 -71 2 -14 90 83 11 99 65 21 -39 -2 73 -3 90 71 70 -75 12 -92 62 -20 -31 79 77 26 -21 -10 100 -28 74 -18 -20 49 -92 91 -17 26 80 -39 49 -61 97 3 4 85 -78 69 79 26 -77 -55 -48 -31 -74 32 -7 28 -52 -86 -58 -42 -79 74 34 -91 -80 -61 40 -14 46 -1 -9 37 61 82 -56 -81 -6 45 -88 -82 -11 -92 22 62 -71 12 39 -95 -80 -12 26 43 92 87 -84 33 25 39 -95 -72 37 -26 -92 62 78 -20 -54 20 45 47 -10 27 -24 4 -66 79 -94 34 -81 24 -31 29 96 -59 -41 41 -14 56 67 -43 -10 -48 60 -44 -75 -16 -29 -18 28 -98 64 98 96 73 -96 81 78 -19 18 79 14 99 -25 33 -70 -72 -89 46 41 4 -48 -4 1 15 -89 -50 17 -34 -9 92 -75 -85 24 -71 -68 -38 85 -78 -73 13 -6 -55 -59 1 76 -30 -79 98 100 57 94 4 -86 -94 -65 -100 -46 -28 5 1 87 65 93 -76 59 -25 -65 -69 0 -7 60 -97 -70 -52 -11 10 -38 51 -100 69 48 92 12 46 23 63 -83 44 -72 -27 5 -20 27 89 -46 45 94 6 -77 -67 46 28 22 -89 -70 70 -25 -84 27 25 11 -50 -97 94 -4 5 -56 76 90 2 20 16 -79 29 -20 -86 9 29 -77 14 13 79 -86 82 37 -76 -6 -44 -25 -26 -34 -60 44 -53 -58 45 -84 -96 -44 66 -48 -22 -19 97 -8 85 -69 42 -21 99 -54 -42 -28 -68 32 19 -25 1 -37 -25 88 -93 26 -47 3 19 -73 -77 93 31 4 -22 -51 -12 94 25 -18 -80 -39 -56 13 31 -60 -56 30 46 44 61 -32 -26 -85 -92 60 -40 -100 -23 -57 -6 -89 -76 -73 -98 18 -44 -89 92 -12 -56 36 27 -34 -44 -64 93 96 68 -64 -45 -49 -24 69 94 3 -94 -48 2 83 -2 40 -76 90 -86 -7 82 66 -43 -56 54 -89 -88 -35 -59 34 0 -45 24 91 100 66 18 96 -72 -34 -75 -55 89 65 80 59 22 -85 97 -92 -49 -42 43 -26 24 -34 77 44 -79 -2 21 64 -30 6 75 27 22 -21 -96 -1 -33 -44 97 -24 -11 -96 22 -37 89 22 95 18 43 -23 89 -21 -1 98 -35 68 -25 -27 78 47 -37 -39 88 5 21 32 -61 46 -28 -53 97 30 64 5 93 72 -76 62 -98 78 -41 -81 -86 48 99 -97 -48 -18 16 -11 54 -18 -37 74 32 -44 -82 29 93 21 51 -70 -81 -13 34 -1 93 -48 50 68 71 11 99 53 25 31 -71 80 -33 -27 98 95 -53 56 -35 86 -10 -77 64 -67 21 95 -24 91 -38 -33 93 -61 -87 64 -67 -72 -76 -90 -5 -24 -2 77 -43 -9 -37 -73 11 35 -5 64 -35 -21 -9 -12 71 27 -44 61 88 -28 -24 70 -53 66 -56 5 4 -33 18 -7 88 6 7 97 27 65 87 4 -40 53 99 80 -15 -28 97 52 5 -84 47 22 27 94 57 -34 -72 31 -83 12 100 -36 52 3 -72 5 -24 68 -69 26 97 50 86 -73 -98 -12 -40 -21 -68 67 20 -8 -48 -75 -60 9 97 -56 -67 -35 21 -94 -19 39 25 70 45 66 5 2 28 -11 41 68 42 72 -78 52 63 -96 78 -86 47 -66 80 -53 -83 27 80 0 29 2 46 97 83 -28 52 10 57 46 32 -48 -89 -4 -23 -71 22 81 -75 -66 92 1 93 -63 -67 73 74 -68 86 -29 29 -22 -98 46 -11 25 -78 -14 15 85 35 -49 -52 77 -70 5 -56 -50 32 -78 -45 -21 99 66 11 -85 26 -5 -36 80 -43 -6 4 -35 -40 -8 -88 64 -43 -31 79 2 -86 -36 -35 -53 -44 -69 -16 -99 16 -11 13 -71 72 98 68 -100 16 3 -52 75 18 47 -52 -66 78 0 40 69 90 -47 48 -24 -41 -66 29 -90 3 -63 14 80 32 -14 -14 -100 67 -6 91 91 19 24 40 81 -23 -14 -16 35 2 10 -87 -25 22 41 -22 26 75 -97 89 -97 96 -33 -52 22 77 84 22 9 -14 -93 -73 -34 -84 -37 -50 -29 55 -58 -39 -66 0 -81 -3 46 67 -18 33 -66 -34 96 10 -91 -11 -60 74 77 -38 -93 -36 40 15 40 -66 66 -56 -84 41 16 -67 -66 55 15 -42 -5 -72 -30 2 -1 -4 52 -42 -58 -59 71 -62 -8 66 19 38 -13 21 -25 99 -95 -89 16 -70 28 -97 22 -55 -69 12 11 -66 -48 24 15 -27 67 28 -47 52 -25 -68 52 -86 -45 64 -85 -30 51 -98 -37 77"
a = a.split()
a = list(map(int, a))
print(Solution().solve(a, [1], [1], [1], [1]))
|
import json
import random
def random_color():
hex_characters = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"]
color_string = ""
return color_string.join(random.choices(hex_characters, k=6))
def get_dict_from_json(name):
json_file = open(name, "r")
json_string = json_file.read()
json_file.close()
if json_string:
return json.loads(json_string)
else:
return {}
party = get_dict_from_json("party.json")
npcs = get_dict_from_json("npcs.json")
world = get_dict_from_json("world.json")
def dict_to_json(dict_i, json_i):
json_file = open(json_i, "w")
json_string = json.dumps(dict_i)
json_file.write(json_string)
json_file.close()
def backup_party():
dict_to_json(party, "party.json")
def backup_npcs():
dict_to_json(npcs, "npcs.json")
def backup_characters():
backup_party()
backup_npcs()
def backup_wizards(wizards):
dict_to_json(wizards, "wizards.json")
def backup_world(world):
dict_to_json(world, "world.json")
next_name = None
next_short_name = None
next_backstory = None
def find_character(name):
if name in party:
return party[name]
elif name in npcs:
return npcs[name]
else:
return None
|
#! /usr/bin/python3
import os
from N4Tools import Design
commands = [
'sudo apt-get install python3-setuptools',
'apt list --upgradable',
'sudo apt install git',
'git clone https://github.com/kivy/buildozer.git',
'cd buildozer && sudo python3 setup.py install',
'sudo apt update',
'sudo apt install -y git zip openjdk-8-jdk python3-pip autoconf libtool pkg-config zlib1g-dev libncurses5-dev libncursesw5-dev libtinfo5 cmake libffi-dev libssl-dev',
'pip3 install --user --upgrade cython virtualenv',
'sudo apt-get install cython',
'python3 -m pip install cython',
]
os.chdir(os.environ['HOME'])
for i in commands:
os.system(i)
Error = False
try:
import Cython
except:
Error = True
msg = '[$LGREEN]# Done...[$/]'
if Error:msg = '[$LRED]# Error...[$/]'
print(Design.Color().reader(msg))
|
from PIL import Image
from PIL.ExifTags import TAGS
import time
def get_date(exifinfo):
if 36867 in exifinfo: # 36867:DateTimeOriginal
exifdate = exifinfo[36867]
elif 36868 in exifinfo: # 36868:DateTimeDigitized
exifdate = exifinfo[36868]
elif 306 in exifinfo: # 306:DateTime
exifdate = exifinfo[306]
print(exifdate)
# 转为日期格式
if exifdate.isdigit():
t_array = time.localtime(int(exifdate)/1000)
else:
t_array = time.strptime(exifdate, "%Y:%m:%d %H:%M:%S")
date = time.strftime("%Y_%m_%d", t_array)
timestamp = time.strftime("%Y%m%d_%H%M%S", t_array)
print(date, timestamp)
return date, timestamp
def get_maker(exifinfo):
if 271 in exifinfo: # 271:Make
maker = exifinfo[271].replace(' ', '_')
if 272 in exifinfo: # 272:Model
model = exifinfo[272].replace(' ', '_')
print(maker, model)
return maker, model
def get_exif_data(fname):
"""Get embedded EXIF data from image file."""
ret = {}
try:
img = Image.open(fname)
if hasattr(img, '_getexif'):
exifinfo = img._getexif()
if exifinfo != None:
date, timestamp = get_date(exifinfo)
maker, model = get_maker(exifinfo)
ret["date"] = date
ret["time"] = timestamp
ret["maker"] = maker
ret["model"] = model
for tag, value in exifinfo.items():
key = TAGS.get(tag, tag)
ret[key] = value
print(tag, value)
except IOError:
print('IOERROR ' + fname)
return ret
if __name__ == '__main__':
filename = '/Users/zcjl/IMG_1192.JPG'
exif = get_exif_data(filename)
print(exif)
|
with open("task2.txt") as file:
data = file.read()
print(data.replace("\n", ""))
|
from django.apps import AppConfig
class EventappConfig(AppConfig):
name = 'EventApp'
|
import json
import time
import boto3
from botocore.exceptions import (
ClientError, NoCredentialsError, ParamValidationError
)
from .exceptions import (
BucketNameAlreadyInUse, CannotGetCurrentUser, CannotListAccountAliases,
CredentialsNotFound, InvalidBucketName, InvalidUserName, UserNameTaken
)
POLICY_NAME_FORMAT = '{bucket_name}-owner-policy'
class BucketCreator:
def __init__(self, profile_name=None, region_name=None):
self.session = boto3.session.Session(profile_name=profile_name,
region_name=region_name)
self.s3 = self.session.resource('s3')
self.iam = self.session.resource('iam')
def commit(self, data):
bucket = self.create_bucket(data['bucket_name'], data['region'])
user = self.create_user(bucket, data['user_name'])
self.set_bucket_policy(
bucket,
user,
public_get_object_paths=data.get('public_get_object_paths')
)
if data.get('cors_origins'):
self.set_cors(bucket, data['cors_origins'])
if data.get('enable_versioning'):
self.enable_versioning(bucket)
def get_bucket_policy_statement_for_get_object(self, bucket,
public_get_object_paths):
"""
Create policy statement to enable the public to perform s3:getObject
on specified paths.
"""
if public_get_object_paths:
def format_path(path):
if path.startswith('/'):
path = path[1:]
return "arn:aws:s3:::{bucket_name}/{path}".format(
bucket_name=bucket.name,
path=path,
)
paths_resources = []
for path in public_get_object_paths:
paths_resources.append(format_path(path))
return {
"Sid": "PublicGetObject",
"Effect": "Allow",
"Principal": "*",
"Action": ["s3:GetObject"],
"Resource": paths_resources,
}
def get_bucket_policy_statements_for_user_access(self, bucket, user):
# Create policy statement giving the created user access to
# non-destructive actions on the bucket.
yield {
"Sid": "AllowUserManageBucket",
"Effect": "Allow",
"Principal": {
"AWS": user.arn
},
"Action": [
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:ListBucketMultipartUploads",
"s3:ListBucketVersions"
],
"Resource": "arn:aws:s3:::{bucket_name}".format(
bucket_name=bucket.name
)
}
# Create policy statement giving the created user full access over the
# objects.
yield {
"Sid": "AllowUserManageBucketObjects",
"Effect": "Allow",
"Principal": {
"AWS": user.arn
},
"Action": "s3:*",
"Resource": "arn:aws:s3:::{bucket_name}/*".format(
bucket_name=bucket.name
)
}
def set_bucket_policy(self, bucket, user, public_get_object_paths=None):
policy_statement = []
if public_get_object_paths:
policy_statement.append(
self.get_bucket_policy_statement_for_get_object(
bucket, public_get_object_paths
)
)
policy_statement.extend(list(
self.get_bucket_policy_statements_for_user_access(bucket, user)
))
policy = json.dumps({
"Version": "2012-10-17",
"Statement": policy_statement,
})
while True:
try:
bucket.Policy().put(Policy=policy)
except ClientError as e:
if e.response['Error']['Code'] == 'MalformedPolicy':
print('Waiting for the user to be available to be '
'attached to the policy (wait 5s).')
time.sleep(5)
continue
raise e
else:
break
print('Bucket policy set.')
def create_bucket(self, name, region):
"""
Create bucket of name in the given region.
"""
create_bucket_kwargs = {}
create_bucket_config = {}
# us-east-1 does not work with location specified.
if region != 'us-east-1':
create_bucket_config['LocationConstraint'] = region
if create_bucket_config:
create_bucket_kwargs['CreateBucketConfiguration'] = (
create_bucket_config
)
bucket = self.s3.Bucket(name)
response = bucket.create(**create_bucket_kwargs)
msg = 'Created bucket "{bucket_name}" at "{bucket_location}" in ' \
'region "{region}".'
print(msg.format(
bucket_name=name,
bucket_location=response['Location'],
region=region,
))
print()
print('\tAWS_STORAGE_BUCKET_NAME', name)
print()
bucket.wait_until_exists()
return bucket
def enable_versioning(self, bucket):
bucket.Versioning().enable()
print('Enabled versioning for "{}".'.format(bucket.name))
def create_user(self, bucket, user_name):
user = self.iam.User(user_name).create()
self.iam.meta.client.get_waiter('user_exists').wait(UserName=user_name)
user.load()
print('Created IAM user "{user_name}".'.format(
user_name=user.arn
))
self.create_user_access_key_pair(user)
return user
def create_user_access_key_pair(self, user):
access_key_pair = user.create_access_key_pair()
print('Created access key pair for user "{user}".'.format(
user=user.arn,
))
print()
print('\tAWS_ACCESS_KEY_ID', access_key_pair.access_key_id)
print('\tAWS_SECRET_ACCESS_KEY', access_key_pair.secret_access_key)
print()
return access_key_pair
def set_cors(self, bucket, origins):
try:
# Validates that the origins is an iterable and is
# not empty.
next(iter(origins))
except StopIteration:
raise ValueError("'origins' cannot be empty.")
config = {
'CORSRules': [
{
'AllowedMethods': ['GET'],
'AllowedOrigins': origins,
'MaxAgeSeconds': 3000,
'AllowedHeaders': ['Authorization'],
}
]
}
msg = "Set CORS for domains {domains} to bucket \"{bucket_name}\"."
print(msg.format(domains=', '.join(origins), bucket_name=bucket.name))
bucket.Cors().put(CORSConfiguration=config)
def validate_bucket_name(self, bucket_name):
try:
self.s3.meta.client.head_bucket(Bucket=bucket_name)
except ClientError as e:
# Bucket does not exist, proceed with creation.
if e.response['Error']['Code'] == '404':
return
# No access to the bucket means that it already exists but we
# cannot run head request on it.
elif e.response['Error']['Code'] == '403':
raise BucketNameAlreadyInUse
else:
raise e
except ParamValidationError as e:
raise InvalidBucketName(str(e)) from e
else:
raise BucketNameAlreadyInUse
def validate_user_name(self, user_name):
try:
self.iam.User(user_name).load()
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
raise InvalidUserName(str(e)) from e
if not e.response['Error']['Code'] == 'EntityAlreadyExists':
return
raise e
else:
raise UserNameTaken
def get_current_user(self):
try:
user = self.iam.CurrentUser()
user.load()
return user
except NoCredentialsError as e:
raise CredentialsNotFound from e
except ClientError as e:
if e.response['Error']['Code'] == 'AccessDenied':
raise CannotGetCurrentUser from e
raise e
def get_current_account_alias(self):
try:
response = self.iam.meta.client.list_account_aliases()
except NoCredentialsError as e:
raise CredentialsNotFound from e
except ClientError as e:
if e.response['Error']['Code'] == 'AccessDenied':
raise CannotListAccountAliases from e
raise e
try:
return response['AccountAliases'][0]
except IndexError:
return
|
def already_existing1():
pass
def already_existing2():
pass
def already_existing3():
pass
|
import os
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
# Example
createFolder('./data/')
# Creates a folder in the current directory called data
|
DATA = [
{
'name': 'Facundo',
'age': 72,
'organization': 'Platzi',
'position': 'Technical Mentor',
'language': 'python',
},
{
'name': 'Luisana',
'age': 33,
'organization': 'Globant',
'position': 'UX Designer',
'language': 'javascript',
},
{
'name': 'Héctor',
'age': 19,
'organization': 'Platzi',
'position': 'Associate',
'language': 'ruby',
},
{
'name': 'Gabriel',
'age': 20,
'organization': 'Platzi',
'position': 'Associate',
'language': 'javascript',
},
{
'name': 'Mariandrea',
'age': 30,
'organization': 'Platzi',
'position': 'QA Manager',
'language': 'java',
},
{
'name': 'Karo',
'age': 23,
'organization': 'Everis',
'position': 'Backend Developer',
'language': 'python',
},
{
'name': 'Ariel',
'age': 32,
'organization': 'Rappi',
'position': 'Support',
'language': '',
},
{
'name': 'Juan',
'age': 17,
'organization': '',
'position': 'Student',
'language': 'go',
},
{
'name': 'Pablo',
'age': 32,
'organization': 'Master',
'position': 'Human Resources Manager',
'language': 'python',
},
{
'name': 'Lorena',
'age': 56,
'organization': 'Python Organization',
'position': 'Language Maker',
'language': 'python',
},
]
def run():
all_python_devs = filter(lambda dev: dev['language'] == 'python', DATA)
all_Platzi_workers = filter(lambda platzi_worker: platzi_worker['organization'] == 'Platzi', DATA )
adults = filter(lambda adult: adult['age'] >= 18, DATA)
def homeless(workers):
n_worker = dict(workers)
n_worker['homeless'] = n_worker['organization'] == ''
return n_worker
def old (workers):
n_old = dict(workers)
n_old['old'] = n_old['age'] >= 30
return n_old
workers = map(homeless, DATA)
old_people = map(old, DATA)
all_homeless = filter(lambda homeless: homeless['homeless'] == True, workers)
all_old_people = filter (lambda old: old['old'] == True, old_people)
print('Python devs: ')
for dev in all_python_devs:
print(dev['name'])
print('\n')
print('Platzi workers: ')
for worker in all_Platzi_workers:
print(worker['name'])
print('\n')
print('Adults: ')
for adult in adults:
print(adult['name'])
print('\n')
print('Homeless: ')
for homeless in all_homeless:
print(homeless['name'])
print('\n')
print('Old people: ')
for old_people in all_old_people:
print(old_people['name'])
print('\n')
# Remember: when possible, use lambdas
if __name__ == '__main__':
run()
|
#!/usr/bin/env python
"""
This module contains functions to construct classifiers, get their param
distributions for a grid search, get top features and other
smaller utility functions.
"""
__all__ = [
"supported_estimators",
"publication_ensemble",
"get_parameter_distribution_for_model",
"make_classifier",
"make_gridsearch_clf"
]
import logging
import numpy as np
from numpy.random import RandomState
from operator import itemgetter
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.multioutput import ClassifierChain
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.base import clone
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold, RandomizedSearchCV
from ..base.utilities import get_term_description, rename
from ..base.constants import MAX_SEED
from .classifier_chain import KRandomClassifierChains
from .binary_relevance import MixedBinaryRelevanceClassifier
logger = logging.getLogger("pyppi")
def publication_ensemble():
"""Returns a `dict` mapping labels to their the classifier used
in the publication experiments."""
label_model_map = {
'Acetylation': 'LogisticRegression',
'Activation': 'RandomForestClassifier',
'Binding/association': 'RandomForestClassifier',
'Carboxylation': 'LogisticRegression',
'Deacetylation': 'RandomForestClassifier',
'Dephosphorylation': 'RandomForestClassifier',
'Dissociation': 'RandomForestClassifier',
'Glycosylation': 'LogisticRegression',
'Inhibition': 'RandomForestClassifier',
'Methylation': 'LogisticRegression',
'Myristoylation': 'LogisticRegression',
'Phosphorylation': 'RandomForestClassifier',
'Prenylation': 'LogisticRegression',
'Proteolytic-cleavage': 'LogisticRegression',
'State-change': 'LogisticRegression',
'Sulfation': 'RandomForestClassifier',
'Sumoylation': 'RandomForestClassifier',
'Ubiquitination': 'LogisticRegression'
}
return label_model_map
def supported_estimators():
"""Return a `dict` of supported estimators."""
allowed = {
'LogisticRegression': LogisticRegression,
'RandomForestClassifier': RandomForestClassifier,
'DecisionTreeClassifier': DecisionTreeClassifier,
'KNeighborsClassifier': KNeighborsClassifier,
'MultinomialNB': MultinomialNB,
'GaussianNB': GaussianNB,
'BernoulliNB': BernoulliNB
}
return allowed
def get_parameter_distribution_for_model(model, step=None):
"""Returns the parameter distribution for a given `Scikit-learn` estimator
Parameters
----------
model: str
String class name of the `SciKit-Learn` model
step: str, optional, default: None
If the model is placed inside a parent classifier such as a `Pipeline`
then supply the step suffix to prepend to the parameter keys.
Returns
-------
`dict`
Dictionary of parameters for the model that can be used in a
grid search estimator.
See Also
--------
link to grid searches
"""
if model not in supported_estimators():
raise ValueError("{} is not a supported model".format(model))
params = {}
if model == 'LogisticRegression':
params['C'] = list(np.arange(0.0001, 0.001, step=0.0001)) + \
list(np.arange(0.001, 0.01, step=0.001)) + \
list(np.arange(0.01, 0.1, step=0.01)) + \
list(np.arange(0.1, 1.0, step=0.1)) + \
list(np.arange(1.0, 10.5, step=0.5))
params['penalty'] = ['l1', 'l2']
elif model == 'DecisionTreeClassifier':
params["criterion"] = ["gini", "entropy"]
params['max_features'] = ['auto', 'log2'] +\
list(np.arange(0, 1.0, step=0.02))
params["min_samples_leaf"] = list(np.arange(2, 21, step=1))
elif model == 'RandomForestClassifier':
params["n_estimators"] = list(np.arange(10, 250, step=10))
params["criterion"] = ["gini", "entropy"]
params['max_features'] = ['auto', 'log2'] +\
list(np.arange(0.001, 0.01, step=0.001)) + \
list(np.arange(0.01, 0.1, step=0.01))
params["min_samples_leaf"] = list(np.arange(2, 21, step=1))
params["class_weight"] = ['balanced', 'balanced_subsample']
params["bootstrap"] = [False, True]
elif model == "KNeighborsClassifier":
params["n_neighbors"] = list(np.arange(1, 50, step=1))
params["weights"] = ["uniform", "distance"]
params["algorithm"] = ['auto', 'ball_tree', 'kd_tree', 'brute']
params["leaf_size"] = list(np.arange(2, 100, step=2))
params['p'] = list(np.arange(1, 10, step=1))
elif model == "MultinomialNB":
params['alpha'] = list(np.arange(0.0001, 0.001, step=0.0001)) + \
list(np.arange(0.001, 0.01, step=0.001)) + \
list(np.arange(0.01, 0.1, step=0.01)) + \
list(np.arange(0.1, 1.0, step=0.1)) + \
list(np.arange(1.0, 10.5, step=0.5))
elif model == "BernoulliNB":
params['alpha'] = list(np.arange(0.0001, 0.001, step=0.0001)) + \
list(np.arange(0.001, 0.01, step=0.001)) + \
list(np.arange(0.01, 0.1, step=0.01)) + \
list(np.arange(0.1, 1.0, step=0.1)) + \
list(np.arange(1.0, 10.5, step=0.5))
elif model == "GaussianNB":
params = {}
if step:
keys = list(params.keys())
for key in keys:
params['{}__{}'.format(step, key)] = params[key]
params.pop(key)
return params
def make_classifier(algorithm, class_weight='balanced', random_state=None,
n_jobs=1):
"""Wrapper function for building a default classifier with the correct
parameters.
Parameters:
----------
algorithm: str
String class name of the `SciKit-Learn` model
class_weight : str, optional, default: 'balanced'
Sets the `class_weight` parameter if supported by the classifier.
random_state : int or :class:`RandomState` or None, optional, default: None
Sets the `random_state` parameter if supported by the classifier.
n_jobs : int, optional, default: 1
Sets the `n_jobs` parameter if supported by the classifier.
Returns
-------
`estimator`
Classifier with the supplied parameters set if applicable.
"""
supported = supported_estimators()
if algorithm not in supported:
raise ValueError(
"'{}' is not a support classifier. Choose from: {}".format(
algorithm, ', '.join(list(supported.keys()))
)
)
estimator = supported[algorithm]()
if hasattr(estimator, 'n_jobs'):
estimator.set_params(**{'n_jobs': n_jobs})
if hasattr(estimator, 'class_weight'):
estimator.set_params(**{'class_weight': class_weight})
if hasattr(estimator, 'random_state'):
estimator.set_params(**{'random_state': random_state})
if hasattr(estimator, 'probability'):
estimator.set_params(**{'probability': True})
return estimator
def make_gridsearch_clf(model, rcv_splits=3, rcv_iter=30, scoring='f1',
binary=True, n_jobs_model=1, random_state=None,
search_vectorizer=True, n_jobs_gs=1, cv=None,
make_pipeline=True, multilabel=True):
"""Wrapper function to automate the mundane setup of a `Pipeline` classifier
within a `RandomGridSearchCV` estimator. See the links below for more
details on the parameters.
Parameters:
----------
model: str
String class name of the `SciKit-Learn` model which will be the
`estimator` within the `Pipeline`.
rcv_splits : int, optional, default: 3
The number of splits to use during hyper-parameter cross-validation.
rcv_iter : int, optional, default: 30
The number of grid search iterations to perform.
scoring : str, optional default: f1
Scoring method used during hyperparameter search.
binary : bool, optional, default: True
If True sets the `binary` attribute of the `CountVectorizer` to True.
n_jobs_model : int, optional, default: 1
Sets the `n_jobs` parameter of the Pipeline's estimator step.
random_state : int or None, optional, default: None
This is a seed used to generate random_states for all estimator
objects such as the base model and the grid search.
search_vectorizer : bool, optional, default: False
If True, adds the `binary` attribute of the `CountVectorizer` to the
grid search parameter distribution dictionary. Ignored if
`make_pipelin` is False.
n_jobs_gs : int, optional, default: 1
Sets the `n_jobs` parameter of the `RandomizedGridSearch` classifier.
cv : cross-validation generator, str or an iterable, optional
If None, then a :class:`StratifiedKFold` cv generator is used in the
:class:`RandomGridSearchCV`.
make_pipeline : boolean, optional, default: True
Wrap the estimator defined in `model` in a pipeline with the first
step being a `CountVectorizer`. Useful if your features are textual.
Returns
-------
`estimator`
A `RandomisedGridSearchCV` or :class:`KRandomClassifierChains`
classifier.
See Also
--------
`Classifier Chain <http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.ClassifierChain.html#sklearn.multioutput.ClassifierChain>`_
`Randomized Grid Search <http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html>`_
`Pipeline <http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html>`_
"""
max_int = MAX_SEED
rng = RandomState(random_state)
cv_random_state = rng.randint(max_int)
model_random_state = rng.randint(max_int)
rcv_random_state = rng.randint(max_int)
base_estimator = make_classifier(
model, random_state=model_random_state, n_jobs=n_jobs_model)
if make_pipeline:
params = get_parameter_distribution_for_model(model, step="estimator")
vectorizer = CountVectorizer(lowercase=False, binary=binary)
base_estimator = Pipeline(
steps=[('vectorizer', vectorizer), ('estimator', base_estimator)])
if search_vectorizer:
params['vectorizer__binary'] = [False, True]
else:
params = get_parameter_distribution_for_model(model)
if cv is None:
cv = StratifiedKFold(
n_splits=rcv_splits, shuffle=True,
random_state=cv_random_state
)
clf = RandomizedSearchCV(
estimator=base_estimator,
cv=cv,
n_iter=rcv_iter,
n_jobs=n_jobs_gs,
refit=True,
random_state=rcv_random_state,
scoring=scoring,
error_score=0.0,
param_distributions=params
)
return clf
|
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import arg_scope, add_arg_scope
from blocks.helpers import int_shape, get_name
# @add_arg_scope
# def conv2d(inputs, num_filters, kernel_size, strides=1, padding='SAME', nonlinearity=None, bn=True, kernel_initializer=None, kernel_regularizer=None, is_training=False, counters={}):
# outputs = tf.layers.conv2d(inputs, num_filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)
# if bn:
# outputs = tf.layers.batch_normalization(outputs, training=is_training)
# if nonlinearity is not None:
# outputs = nonlinearity(outputs)
# print(" + conv2d", int_shape(inputs), int_shape(outputs), nonlinearity, bn)
# return outputs
#
# @add_arg_scope
# def deconv2d(inputs, num_filters, kernel_size, strides=1, padding='SAME', nonlinearity=None, bn=True, kernel_initializer=None, kernel_regularizer=None, is_training=False, counters={}):
# outputs = tf.layers.conv2d_transpose(inputs, num_filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)
# if bn:
# outputs = tf.layers.batch_normalization(outputs, training=is_training)
# if nonlinearity is not None:
# outputs = nonlinearity(outputs)
# print(" + deconv2d", int_shape(inputs), int_shape(outputs), nonlinearity, bn)
# return outputs
def conv2d_openai(x, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, kernel_initializer=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
name = get_name("conv2d", counters)
with tf.variable_scope(name):
V = tf.get_variable('V', shape=filter_size+[int(x.get_shape()[-1]),num_filters], dtype=tf.float32,
initializer=kernel_initializer, trainable=True)
b = tf.get_variable('b', shape=[num_filters], dtype=tf.float32,
initializer=tf.constant_initializer(0.), trainable=True)
W = V
x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1] + stride + [1], pad), b)
return x
def deconv2d_openai(x, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, kernel_initializer=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
name = get_name("deconv2d", counters)
xs = int_shape(x)
if pad=='SAME':
target_shape = [xs[0], xs[1]*stride[0], xs[2]*stride[1], num_filters]
else:
target_shape = [xs[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters]
with tf.variable_scope(name):
V = tf.get_variable('V', shape=filter_size+[num_filters,int(x.get_shape()[-1])], dtype=tf.float32,
initializer=kernel_initializer, trainable=True)
b = tf.get_variable('b', shape=[num_filters], dtype=tf.float32,
initializer=tf.constant_initializer(0.), trainable=True)
W = V
x = tf.nn.conv2d_transpose(x, W, target_shape, [1] + stride + [1], padding=pad)
x = tf.nn.bias_add(x, b)
return x
@add_arg_scope
def conv2d(inputs, num_filters, kernel_size, strides=1, padding='SAME', nonlinearity=None, bn=True, kernel_initializer=None, kernel_regularizer=None, is_training=False, counters={}):
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
if isinstance(strides, int):
strides = [strides, strides]
outputs = conv2d_openai(inputs, num_filters, filter_size=kernel_size, stride=strides, pad=padding, counters=counters, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)
if bn:
outputs = tf.layers.batch_normalization(outputs, training=is_training)
if nonlinearity is not None:
outputs = nonlinearity(outputs)
print(" + conv2d", int_shape(inputs), int_shape(outputs), nonlinearity, bn)
return outputs
@add_arg_scope
def deconv2d(inputs, num_filters, kernel_size, strides=1, padding='SAME', nonlinearity=None, bn=True, kernel_initializer=None, kernel_regularizer=None, is_training=False, counters={}):
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
if isinstance(strides, int):
strides = [strides, strides]
outputs = deconv2d_openai(inputs, num_filters, filter_size=kernel_size, stride=strides, pad=padding, counters=counters, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)
if bn:
outputs = tf.layers.batch_normalization(outputs, training=is_training)
if nonlinearity is not None:
outputs = nonlinearity(outputs)
print(" + deconv2d", int_shape(inputs), int_shape(outputs), nonlinearity, bn)
return outputs
@add_arg_scope
def dense(inputs, num_outputs, nonlinearity=None, bn=True, kernel_initializer=None, kernel_regularizer=None, is_training=False, counters={}):
inputs_shape = int_shape(inputs)
assert len(inputs_shape)==2, "inputs should be flattened first"
outputs = tf.layers.dense(inputs, num_outputs, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)
if bn:
outputs = tf.layers.batch_normalization(outputs, training=is_training)
if nonlinearity is not None:
outputs = nonlinearity(outputs)
print(" + dense", int_shape(inputs), int_shape(outputs), nonlinearity, bn)
return outputs
# @add_arg_scope
# def resnet_block(inputs, num_filters, kernel_size, strides=1, padding='SAME', nonlinearity=None, bn=True, kernel_initializer=None, kernel_regularizer=None, is_training=False, counters={}):
# print(nonlinearity)
# kwargs = {
# "bn": bn,
# "kernel_initializer": kernel_initializer,
# "kernel_regularizer": kernel_regularizer,
# "is_training": is_training,
# "counters": counters,
# }
# outputs = conv2d(inputs, num_filters, kernel_size, strides, padding, nonlinearity=nonlinearity, **kwargs)
# outputs = conv2d(outputs, num_filters, kernel_size, strides, padding, nonlinearity=None, **kwargs)
# outputs = nonlinearity(outputs + inputs)
# return outputs
@add_arg_scope
def residual_block(inputs, num_filters, kernel_size, strides=1, padding='SAME', nonlinearity=None, bn=True, kernel_initializer=None, kernel_regularizer=None, is_training=False, counters={}):
kwargs = {
"bn": bn,
"nonlinearity": nonlinearity,
"kernel_initializer": kernel_initializer,
"kernel_regularizer": kernel_regularizer,
"is_training": is_training,
"counters": counters,
}
outputs = conv2d(inputs, num_filters, 1, 1, 'SAME', **kwargs)
outputs = conv2d(outputs, num_filters, kernel_size, strides, 'SAME', **kwargs)
outputs = conv2d(outputs, num_filters, 1, 1, 'SAME', **kwargs)
return inputs + outputs
# shift
def down_shift(x):
xs = int_shape(x)
return tf.concat([tf.zeros([xs[0],1,xs[2],xs[3]]), x[:,:xs[1]-1,:,:]],1)
def right_shift(x):
xs = int_shape(x)
return tf.concat([tf.zeros([xs[0],xs[1],1,xs[3]]), x[:,:,:xs[2]-1,:]],2)
def up_shift(x):
xs = int_shape(x)
return tf.concat([x[:,1:xs[1],:,:], tf.zeros([xs[0],1,xs[2],xs[3]])],1)
def left_shift(x):
xs = int_shape(x)
return tf.concat([x[:,:,1:xs[2],:], tf.zeros([xs[0],xs[1],1,xs[3]])],2)
# down, right
@add_arg_scope
def down_shifted_conv2d(x, num_filters, filter_size=[2,3], strides=[1,1], **kwargs):
x = tf.pad(x, [[0,0],[filter_size[0]-1,0], [int((filter_size[1]-1)/2),int((filter_size[1]-1)/2)],[0,0]])
return conv2d(x, num_filters, kernel_size=filter_size, strides=strides, padding='VALID', **kwargs)
@add_arg_scope
def down_shifted_deconv2d(x, num_filters, filter_size=[2,3], strides=[1,1], **kwargs):
x = deconv2d(x, num_filters, kernel_size=filter_size, strides=strides, padding='VALID', **kwargs)
xs = int_shape(x)
return x[:,:(xs[1]-filter_size[0]+1),int((filter_size[1]-1)/2):(xs[2]-int((filter_size[1]-1)/2)),:]
@add_arg_scope
def down_right_shifted_conv2d(x, num_filters, filter_size=[2,2], strides=[1,1], **kwargs):
x = tf.pad(x, [[0,0],[filter_size[0]-1, 0], [filter_size[1]-1, 0],[0,0]])
return conv2d(x, num_filters, kernel_size=filter_size, strides=strides, padding='VALID', **kwargs)
@add_arg_scope
def down_right_shifted_deconv2d(x, num_filters, filter_size=[2,2], strides=[1,1], **kwargs):
x = deconv2d(x, num_filters, kernel_size=filter_size, strides=strides, padding='VALID', **kwargs)
xs = int_shape(x)
return x[:,:(xs[1]-filter_size[0]+1):,:(xs[2]-filter_size[1]+1),:]
# up, left
@add_arg_scope
def up_shifted_conv2d(x, num_filters, filter_size=[2,3], strides=[1,1], **kwargs):
x = tf.pad(x, [[0,0],[0, filter_size[0]-1], [int((filter_size[1]-1)/2),int((filter_size[1]-1)/2)],[0,0]])
return conv2d(x, num_filters, kernel_size=filter_size, strides=strides, padding='VALID', **kwargs)
@add_arg_scope
def up_shifted_deconv2d(x, num_filters, filter_size=[2,3], strides=[1,1], **kwargs):
x = deconv2d(x, num_filters, kernel_size=filter_size, strides=strides, padding='VALID', **kwargs)
xs = int_shape(x)
return x[:,(xs[1]-filter_size[0]+1):,int((filter_size[1]-1)/2):(xs[2]-int((filter_size[1]-1)/2)),:]
@add_arg_scope
def up_left_shifted_conv2d(x, num_filters, filter_size=[2,2], strides=[1,1], **kwargs):
x = tf.pad(x, [[0,0],[0, filter_size[0]-1], [0, filter_size[1]-1],[0,0]])
return conv2d(x, num_filters, kernel_size=filter_size, strides=strides, padding='VALID', **kwargs)
@add_arg_scope
def up_left_shifted_deconv2d(x, num_filters, filter_size=[2,2], strides=[1,1], **kwargs):
x = deconv2d(x, num_filters, kernel_size=filter_size, strides=strides, padding='VALID', **kwargs)
xs = int_shape(x)
return x[:,(xs[1]-filter_size[0]+1):,(xs[2]-filter_size[1]+1):,:]
@add_arg_scope
def nin(x, num_units, **kwargs):
""" a network in network layer (1x1 CONV) """
s = int_shape(x)
x = tf.reshape(x, [np.prod(s[:-1]),s[-1]])
x = dense(x, num_units, **kwargs)
return tf.reshape(x, s[:-1]+[num_units])
@add_arg_scope
def gated_resnet(x, a=None, gh=None, sh=None, nonlinearity=tf.nn.elu, conv=conv2d, dropout_p=0.0, counters={}, **kwargs):
name = get_name("gated_resnet", counters)
print("construct", name, "...")
xs = int_shape(x)
num_filters = xs[-1]
kwargs["counters"] = counters
with arg_scope([conv], **kwargs):
c1 = conv(nonlinearity(x), num_filters)
if a is not None: # add short-cut connection if auxiliary input 'a' is given
c1 += nin(nonlinearity(a), num_filters)
c1 = nonlinearity(c1)
c1 = tf.nn.dropout(c1, keep_prob=1. - dropout_p)
c2 = conv(c1, num_filters * 2)
# add projection of h vector if included: conditional generation
if sh is not None:
c2 += nin(sh, 2*num_filters, nonlinearity=nonlinearity)
if gh is not None: # haven't finished this part
pass
a, b = tf.split(c2, 2, 3)
c3 = a * tf.nn.sigmoid(b)
return x + c3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.