blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
52136ef79a2ff29b8dff5c2d42a403296fc0f539
|
3f2443dba35c42a296ffaef72c731f33c4892d07
|
/seas5/initial_s5_explore.py
|
cce51ad8b442b5104226f4bdf341466ba07cc3c7
|
[] |
no_license
|
tommylees112/esowc_notes
|
7cde6a373211647447084789fbee2597d81c0ce8
|
2a39c5c20ed50f3194ebb95aba720e48215c0dfd
|
refs/heads/master
| 2021-06-21T17:07:33.663693
| 2021-01-28T18:34:58
| 2021-01-28T18:34:58
| 184,609,893
| 3
| 0
| null | 2019-05-26T19:49:30
| 2019-05-02T15:54:31
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 9,781
|
py
|
from src.preprocess import S5Preprocessor
from pathlib import Path
import xarray as xr
import numpy as np
from pandas import Timedelta
import cfgrib
%load_ext autoreload
%autoreload 2
data_path = Path('data')
ds = xr.open_dataset(data_path / 'interim/s5_preprocessed/s5_tprate_kenya.nc', chunks={'number': 1})
d_min = xr.open_dataset(data_path / 'interim/s5_interim/tprate/Y2014_M01_12_tprate_kenya.nc')
d_raw = xr.open_dataset(
'data/raw/seasonal-monthly-single-levels/total_precipitation/2014/Y2014_M01_12.grib', engine='cfgrib'
)
## check how we have got 51 ensemble members?
# first 25 are mostly nan, second 25 are pretty much all nan
n_nulls = ds.isnull().mean(
dim=['lat', 'lon', 'time']
)
print("Proportion of nans in each number (ensemble member)", n_nulls.tprate.values)
# MEAN of ensemble
ds = ds.mean(dim='number')
# OR first 25 ensemble members
ds = ds.isel(number=slice(0, 25))
# --------------------------
# Oxford Server test
# --------------------------
from src.preprocess.seas5.ouce_s5 import OuceS5Data
# --------------------------
# stack the data
# --------------------------
# create the TRUE times
stacked = ds.stack(time=('initialisation_date', 'forecast_horizon'))
t = stacked.time.values
initialisation_dates = np.array(list(zip(*t))[0])
forecast_horizons = np.array(list(zip(*t))[1])
# time_2d = np.array([list(elem) for elem in t])
times = initialisation_dates + forecast_horizons
ds['valid_time'] = (
['initialisation_date', 'forecast_horizon'], times.reshape(312, 12)
)
ds.assign_coords(
time=(['initialisation_date', 'forecast_horizon'], times.reshape(312, 12))
)
stacked['time'] = times
stacked = stacked.assign_coords(
initialisation_date=('time', initialisation_dates))
stacked = stacked.assign_coords(forecast_horizon=('time', forecast_horizons))
# stacked = stacked.where(stacked['time.day'] == 1, drop=True)
stacked = stacked.dropna(dim='time', how='all')
# FROM THE MODULE
s = S5Preprocessor()
stacked = s.stack_time(ds)
# --------------------------
# map to n months ahead
# --------------------------
# map forecast horizons to months ahead
map_ = {
pd.Timedelta('28 days 00:00:00'): 1,
pd.Timedelta('29 days 00:00:00'): 1,
pd.Timedelta('30 days 00:00:00'): 1,
pd.Timedelta('31 days 00:00:00'): 1,
pd.Timedelta('59 days 00:00:00'): 2,
pd.Timedelta('60 days 00:00:00'): 2,
pd.Timedelta('61 days 00:00:00'): 2,
pd.Timedelta('62 days 00:00:00'): 2,
pd.Timedelta('89 days 00:00:00'): 3,
pd.Timedelta('90 days 00:00:00'): 3,
pd.Timedelta('91 days 00:00:00'): 3,
pd.Timedelta('92 days 00:00:00'): 3,
}
fhs = [pd.Timedelta(fh) for fh in stacked.forecast_horizon.values]
months = [map_[fh] for fh in fhs]
d = d.assign_coords(months_ahead=('time', months))
# SELECT ALL THE ONE MONTH FORECASTS
d.loc[dict(time=d.months_ahead == 1)]
d.loc[dict(time=d.months_ahead == 2)]
# select the forecasts n months ahead
var = 'tprate'
def get_n_timestep_ahead_data(ds: xr.Dataset, n_tstep: int,
tstep_coord_name: str = 'months_ahead') -> xr.Dataset:
""" Get the data for the n timesteps ahead """
assert tstep_coord_name in [c for c in ds.coords], \
'expect the number of timesteps ahead to have been calculated' \
f' already. Coords: {[c for c in ds.coords]}'
variables = [v for v in ds.data_vars]
all_nstep_list = []
for var in variables:
d_nstep = (
ds.loc[dict(time=ds[tstep_coord_name] == n_tstep)]
.rename({var: var + f'_{n_tstep}'})
)
all_nstep_list.append(d_nstep)
return xr.auto_combine(all_nstep_list)
def create_variables_for_n_timesteps_predictions(ds: xr.Dataset, tstep_coord_name: str = 'months_ahead') -> xr.Dataset:
assert all(
np.isin(
['initialisation_date', 'forecast_horizon', tstep_coord_name],
[c for c in ds.coords]
)), 'Expecting to have ' \
f'initialisation_date forecast_horizon {tstep_coord_name} in ds.coords' \
f'currently: {[c for c in ds.coords]}'
timesteps = np.unique(ds[tstep_coord_name])
all_timesteps = []
for step in timesteps:
d = get_n_timestep_ahead_data(
ds, step, tstep_coord_name=tstep_coord_name)
d = d.drop(
['initialisation_date', 'forecast_horizon', tstep_coord_name])
all_timesteps.append(d)
return xr.auto_combine(all_timesteps)
# get variance and mean
import re
# check data_var ends with a digit ('tprate_1')
# ensure that prior preprocessing done!
match = re.compile(r'_\d')
variables = [v for v in ds.data_vars]
assert all([bool(match.search(v)) for v in variables), 'Expect '\
'to have calculated the n month ahead for the variables in dataset' \
f'currently: {variables}'
def get_variance_and_mean_over_number(ds: xr.Dataset) -> xr.Dataset:
"""Collapse the 'number' dimension and return a Dataset with
(lat, lon, time) coords and two variables:
{var}_mean / {var}_std
"""
variables = [v for v in ds.data_vars]
# ensure that 'number' still exists in the coords
assert 'number' in [c for c in ds.coords], 'require `number` to '\
'be a coord in the Dataset object to collapse by mean/std'
# calculate mean and std collapsing the 'number' coordinate
predict_ds_list = []
for var in variables:
print(f"Calculating the mean / std for forecast variable: {var}")
mean_std = []
mean_std.append(
ds.mean(dim='number').rename({var: var + '_mean'})
)
mean_std.append(
ds.std(dim='number').rename({var: var + '_std'})
)
predict_ds_list.append(xr.auto_combine(mean_std))
return xr.auto_combine(predict_ds_list)
# --------------------------
# stack overflow question
# --------------------------
times = [
pd.to_datetime('2017-01-01'),
pd.to_datetime('2017-01-31'),
pd.to_datetime('2017-01-31'),
pd.to_datetime('2017-02-01'),
pd.to_datetime('2017-02-02'),
pd.to_datetime('2017-03-01'),
pd.to_datetime('2017-03-01'),
pd.to_datetime('2017-03-29'),
pd.to_datetime('2017-03-30'),
pd.to_datetime('2017-04-01'),
pd.to_datetime('2017-04-01'),
pd.to_datetime('2017-04-01'),
]
data = np.ones((11, 3, 3))
data[[1, 2, 4, 5, 7, 8], :, :] = np.nan
lat = [0, 1, 2]
lon = [0, 1, 2]
ds = xr.Dataset(
{'data': (['time', 'lat', 'lon'], data)},
coords={
'lon': lon,
'lat': lat,
'time': times,
}
)
ds.where(ds['time.day'] == 1, drop=True)
stacked.where(stacked['time.day'] == 1, drop=True)
# ------------------------------------------------------------------------------
# Select forecasts for a given month
# ------------------------------------------------------------------------------
###
# select all forecasts of a given time (but ignore the )
# stack the `initialisation_date` and `forecast_horizon`
forecast_horizon = ds.forecast_horizon.values
initialisation_date = ds.initialisation_date.values
valid_time = initialisation_date[:, np.newaxis] + forecast_horizon
stacked = ds.stack(time=('initialisation_date', 'forecast_horizon'))
stacked['time'] = stacked.valid_time
stacked = stacked.drop('valid_time')
# or
stacked = ds.stack(time=('initialisation_date', 'forecast_horizon'))
# stacked['time'] = stacked.valid_time
# select forecasts 28days ahead
stacked.sel(forecast_horizon=np.timedelta64(28, 'D'))
# select 'valid_time'
stacked.swap_dims({'time': 'valid_time'}).sel(valid_time='2018-04')
# MAM forecasts
mam = stacked.sel(time=np.isin(stacked['time.month'], [3, 4, 5]))
fig, ax = plt.subplots()
mam.tprate.mean(dim=['time', 'number']).plot(ax=ax)
# SON forecasts
son = stacked.sel(time=np.isin(stacked['time.month'], [9, 10, 11]))
fig, ax = plt.subplots()
son.tprate.mean(dim=['time', 'number']).plot(ax=ax)
#
#
ds.tprate.mean(dim=['number', 'time']).isel(step=0).plot()
timedeltas = [pd.to_timedelta(val) for val in d.step.values]
# select all forecasts of a given time
d.where(d.valid_time == '2018')
valid_time = np.array([pd.to_datetime(val) for val in d.valid_time.values])
# ------------------------------------------------------------------------------
# Testing preprocess
# ------------------------------------------------------------------------------
#####
s = S5Preprocessor()
dir_ = Path(
'data/raw/seasonal-monthly-single-levels/total_precipitation/2014/Y2014_M01_12.grib'
)
d = s.read_grib_file(dir_)
coords = [c for c in d.coords]
vars = [v for v in d.variables if v not in coords]
variable = '-'.join(vars)
subset_str = 'kenya'
output_path = s.create_filename(
dir_,
s.interim,
variable,
subset_name=subset_str if subset_str is not None else None
)
if 'latitude' in coords:
d = d.rename({'latitude': 'lat'})
if 'longitude' in coords:
d = d.rename({'longitude': 'lon'})
# 5. regrid (one variable at a time)
assert all(np.isin(['lat', 'lon'], [c for c in d.coords])), f"\
Expecting `lat` `lon` to be in d. dims : {[c for c in d.coords]}"
# regrid each variable individually
all_vars = []
for var in vars:
time = d[var].time
d_ = s.regrid(
d[var].to_dataset(name=var), regrid,
clean=False, reuse_weights=True,
)
d_ = d_.assign_coords(time=time)
all_vars.append(d_)
# merge the variables into one dataset
d2 = xr.merge(all_vars).sortby('initialisation_date')
##
#
s = S5Preprocessor()
var = 'tprate'
mfd = xr.open_mfdataset((s.interim / var).as_posix() + "/*.nc")
mfd = mfd.sortby('initialisation_date')
time = mfd[var].time
d3 = s.resample_time(
mfd, resample_length='M', upsampling=False,
time_coord='initialisation_date'
)
d3 = mfd.assign_coords(time=time)
d3.stack(time=['initialisation_date', 'forecast_horizon']).valid_time
|
[
"thomas.lees112@gmail.com"
] |
thomas.lees112@gmail.com
|
9469c7a4ab35d308fe7b7581530b40ccbad1382d
|
3763b55c2d801a15ab6b026dcd56d553bb5951e0
|
/signup/migrations/0001_initial.py
|
951e72aeabadec43455b10f144a5447df2d4604f
|
[] |
no_license
|
ankitgadewal/loginsignup
|
fb3d5fa4417ed4ba6ddee2951d8ce708f7dcf141
|
84401b1cf4180d4452f17f5b74b2a9d6dab156c6
|
refs/heads/master
| 2021-03-17T16:43:29.180147
| 2020-03-14T00:05:15
| 2020-03-14T00:05:15
| 247,003,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
# Generated by Django 3.0.3 on 2020-03-13 22:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Signup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20)),
('password', models.CharField(max_length=20)),
],
),
]
|
[
"ankitgadewal@hotmail.com"
] |
ankitgadewal@hotmail.com
|
19febac08f14ba44a2f8758bd2bf7c7c1ad928ce
|
35ca4ddb7bcfd02137297c46fd1cc4fef9f50e03
|
/StackCharRec/net/CTCRecognizer_new.py
|
85fdb5fe42853e8ebdc2f6835ae3e5787e57a669
|
[] |
no_license
|
qzq2514/Patents
|
0ef0212088f04aa921821fbea360fccd042e9194
|
41abd6d3767e63ae3344c226bceeca1ff70ec206
|
refs/heads/master
| 2020-06-16T06:06:57.211129
| 2020-01-07T10:30:30
| 2020-01-07T10:30:30
| 195,497,168
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,435
|
py
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
class CTCRecognizer(object):
def __init__(self, is_training,keep_prop,num_classes,num_hidden):
self.is_training=is_training
self.num_classes=num_classes
self.num_hidden=num_hidden
self.keep_prop=keep_prop
def preprocess(self, inputs):
#将transpose的操作写在预处理中,这样以后测试时候不用每次都transpose
# tran_inputs=tf.transpose(inputs,[0,2,1,3])
#
# NORMALIZER=0.017
# processed_inputs = tf.to_float(tran_inputs)
# red, green, blue = tf.split(processed_inputs, num_or_size_splits=3, axis=3)
# preprocessed_input = (tf.multiply(blue, 0.2989) +tf.multiply(green, 0.5870)+
# tf.multiply(red, 0.1140))* NORMALIZER
#preprocessed_input=tf.squeeze(preprocessed_input,axis=3)
return inputs
def block(self,inputs,output_channels,stride=1):
res_channels=output_channels-output_channels/2
block_net=slim.separable_convolution2d(inputs,None,[3,3],stride=stride)
block_net=slim.batch_norm(block_net)
block_net=slim.convolution2d(block_net,output_channels/2,[3,3])
block_net = slim.batch_norm(block_net)
res_net=slim.convolution2d(inputs,res_channels,[3,3],stride=stride)
return tf.concat([block_net,res_net],axis=3)
def LPR_small_block(self,inputs,out_channals):
# channels=inputs.get_shape().as_list()[-1]
small_block_net = slim.convolution2d(inputs,
num_outputs=out_channals/4,kernel_size=[1,1],stride=1)
small_block_net = slim.convolution2d(small_block_net,
num_outputs=out_channals / 4, kernel_size=[3,1],stride=1)
small_block_net = slim.convolution2d(small_block_net,
num_outputs=out_channals / 4, kernel_size=[1, 3],stride=1)
small_block_net = slim.convolution2d(small_block_net,
num_outputs=out_channals, kernel_size=[1, 1],stride=1)
return small_block_net
def inference(self,inputs,seq_length):
print("CTCRecognizer_new")
with slim.arg_scope(self.CTC_arg_scope(is_training=self.is_training,
batch_norm_decay=0.8)):
print("input:",inputs)
net = slim.convolution2d(inputs, 64, kernel_size=[3,3], stride=1)
net = slim.max_pool2d(net, kernel_size=[3, 3], stride=1)
net = self.LPR_small_block(net,128)
net = slim.max_pool2d(net, kernel_size=[3, 3], stride=[2, 1])
net = self.LPR_small_block(net, 256)
net = self.LPR_small_block(net, 256)
net = slim.max_pool2d(net, kernel_size=[3, 3], stride=[1, 2])
net = slim.dropout(net,keep_prob=self.keep_prop)
net = slim.convolution2d(net,256,kernel_size=[4, 1],stride=1)
net = slim.dropout(net, keep_prob=self.keep_prop)
net = slim.convolution2d(net, self.num_classes, kernel_size=[4, 1], stride=1)
# logits = tf.reduce_mean(net, axis=2)
logits = slim.avg_pool2d(net, kernel_size=[1,15],stride=[1,15])
logits=tf.squeeze(logits, [2], name='SpatialSqueeze')
# 必须保证最终的输出是(max_time_step,batch_size,num_class)的形式供后面计算CTC Loss
logits = tf.transpose(logits, (1, 0, 2))
#在max_step_downsampling_num=2时,保证logits的shape为:(30 , b , 38)
print("logits:", logits)
# input("Pause")
return logits
def beam_searcn(self,logits,seq_len,is_merge=False):
decoded_logits,log_prob=tf.nn.ctc_beam_search_decoder(logits,seq_len,merge_repeated=is_merge)
return decoded_logits
def decode_a_seq(self,indexes, spars_tensor,chars):
decoded = []
for m in indexes:
# print("m:",m)
str_id = spars_tensor[1][m]
print(m,"---",str_id)
str = chars[str_id]
decoded.append(str)
return decoded #sparse_tensor[0]是N*2的indices
def decode_sparse_tensor(self,sparse_tensor,chars):
decoded_indexes = list()
current_i = 0
current_seq = []
# print(sparse_tensor)
for offset, i_and_index in enumerate(sparse_tensor[0]): # sparse_tensor[0]是N*2的indices
i = i_and_index[0] # 一行是一个样本
# print("i_and_index:",i_and_index)
if i != current_i: # current_is是当前样本的id
decoded_indexes.append(current_seq)
current_i = i
current_seq = list() # current_seq是当前样本预测值在sparse_tensor的values中对应的下标
current_seq.append(offset) # 之后通过下标就可以从sparse_tensor中找到对应的值
decoded_indexes.append(current_seq)
result = []
for index in decoded_indexes:
result.append(self.decode_a_seq(index, sparse_tensor,chars))
return result
def get_edit_distance_mean(self,decoded_logits_placeholder,sparse_labels_placeholder):
# 计算两个稀疏矩阵代表的序列的编辑距离,在预测和标签在样本数量上长度不匹配时可以作为一种评判模型的指标,没有他可无妨
edit_distance_mean = tf.reduce_mean(tf.edit_distance(tf.cast(decoded_logits_placeholder[0], tf.int32), sparse_labels_placeholder))
return edit_distance_mean
#传入实值,而非Tensor
def get_accuarcy(self,decoded_logits,sparse_labels,chars):
#通过稀疏矩阵解析得到最终的预测结果
sparse_labels_list = self.decode_sparse_tensor(sparse_labels, chars)
decoded_list=self.decode_sparse_tensor(decoded_logits,chars)
true_numer = 0
if len(decoded_list) != len(sparse_labels_list):
# print("len(decoded_list)", len(decoded_list), "len(sparse_labels_list)", len(sparse_labels_list),
# " test and detect length desn't match")
return None #edit_distance起作用
for idx, pred_number in enumerate(decoded_list):
groundTruth_number = sparse_labels_list[idx]
cur_correct = (pred_number == groundTruth_number)
info_str="{}:{}-({}) <-------> {}-({})".\
format(cur_correct,groundTruth_number,len(groundTruth_number),pred_number,len(pred_number))
print(info_str)
if cur_correct:
true_numer = true_numer + 1
accuary=true_numer * 1.0 / len(decoded_list)
return accuary
# logits:(24, 50, 67)
# sparse_groundtrouth:是tf.SparseTensor类型,三元组,其中包括(indices, values, shape)
def loss(self,logits,sparse_groundtrouth,seq_len):
loss_all=tf.nn.ctc_loss(labels=sparse_groundtrouth,inputs=logits,sequence_length=seq_len)
loss_mean=tf.reduce_mean(loss_all)
# tf.add_to_collection("Loss", loss_mean)
# loss_all = tf.add_n(tf.get_collection("Loss"), name="total_loss")
#计算正则损失
# regularization_loss=tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
# loss_all = loss_mean+regularization_loss
return loss_mean
def CTC_arg_scope(self,is_training,weight_decay=0.0001,batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,batch_norm_scale=True):
batch_norm_params={
'is_training':is_training,
'decay':batch_norm_decay,
'epsilon':batch_norm_epsilon,
'scale':batch_norm_scale,
# 'updates_collections:':tf.GraphKeys.UPDATE_OPS
}
with slim.arg_scope(
[slim.convolution2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm],**batch_norm_params) :
with slim.arg_scope([slim.max_pool2d],padding="SAME") as arg_sc:
return arg_sc
|
[
"qzq2514@outlook.com"
] |
qzq2514@outlook.com
|
22fdd6c6999df03b964c1e4e91d87db591d5d103
|
a5296e51c4d017411484025652bc04e03b3dc1e5
|
/setup.py
|
587ed30b412b0db70efc46ca4f747ff1700261ca
|
[
"MIT"
] |
permissive
|
multiformats/py-multicodec
|
de3e53c2a54ad6c495af23159ce4fe8da16fcefb
|
4958443f185f6e85bd2c59f86749faceba446cf1
|
refs/heads/master
| 2023-05-14T00:58:36.171732
| 2023-05-09T09:09:35
| 2023-05-09T09:09:35
| 102,285,306
| 18
| 11
|
MIT
| 2023-05-09T09:08:26
| 2017-09-03T18:25:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'varint>=1.0.2,<2.0.0',
'six>=1.10.0,<2.0',
'morphys>=1.0,<2.0',
]
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest',
]
setup(
name='py-multicodec',
version='0.2.1',
description="Multicodec implementation in Python",
long_description=readme + '\n\n' + history,
author="Dhruv Baldawa",
author_email='dhruv@dhruvb.com',
url='https://github.com/multiformats/py-multicodec',
packages=find_packages(include=['multicodec']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='multicodec',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
|
[
"dhruvbaldawa@gmail.com"
] |
dhruvbaldawa@gmail.com
|
722d489f696393dd633f0788bf8c5dd68afecbff
|
a665d491684fc4db553c19a9e23d539555072b29
|
/apps/fb_chatbot_event_tdc/management/commands/bot_event_tdc_delete_persistent_menu.py
|
d8677d96ace755eff584eb364d7bfb5f22c636d4
|
[] |
no_license
|
chester-visualsquares/demo
|
33255ffbc9d04e31d15a3d6922ed14e74a2e72fb
|
06962b4eeaeba6a5ee9ba7435e6ae6aeff84a7e3
|
refs/heads/master
| 2020-03-23T08:05:32.356492
| 2018-06-03T10:02:39
| 2018-06-03T10:02:39
| 141,307,303
| 0
| 0
| null | 2018-07-17T15:15:12
| 2018-07-17T15:15:12
| null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
import logging
import requests
from django.core.management.base import BaseCommand
from fb_chatbot_event_tdc.models import FacebookPage
logger = logging.getLogger('django.command')
class Command(BaseCommand):
args = '<FacebookPage_pk>'
help = ('Delete the persistent menu of the Messenger. Reference:'
'https://developers.facebook.com/docs/messenger-platform/'
'thread-settings/persistent-menu')
def handle(self, fb_pk, **options):
access_token = FacebookPage.objects.get(pk=fb_pk).access_token
params = {
'setting_type': 'call_to_actions',
'thread_state': 'existing_thread',
}
url = ('https://graph.facebook.com/v2.8/me/'
'thread_settings?access_token=%s' % access_token)
response = requests.delete(url, data=params)
self.stdout.write(response.content)
|
[
"no001hk@gmail.com"
] |
no001hk@gmail.com
|
b946fc2f4af36735afe336f5f39229ecf95bfc36
|
99b834a7c289b282aefc8c8a03612540698103c4
|
/gift_app/manage.py
|
38e62ce26486a1fdd6b37966d973440bd4adf12e
|
[] |
no_license
|
nrdhm/ybs_second_task
|
5b84541f78c136ce46f7f94028b59571076f049e
|
7b2e17845fc6fda1d9ed71177eeb9cbc737fb9a4
|
refs/heads/master
| 2022-02-22T06:12:43.923659
| 2019-08-25T09:23:51
| 2019-08-25T09:23:51
| 202,640,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
import asyncio
import click
from injector import Injector
import gift_app.storage as storage_module
from gift_app.providers import ApplicationModule
from gift_app.storage import Storage
@click.group()
def cli():
pass
@cli.command()
def init_db():
storage = _get_storage()
async def go():
await storage.initialize()
async with storage.pool.acquire() as conn:
await storage_module.create_tables(conn)
asyncio.run(go())
@cli.command()
def drop_db():
async def go():
storage = _get_storage()
await storage.initialize()
async with storage.pool.acquire() as conn:
await storage_module.drop_tables(conn)
asyncio.run(go())
def _get_storage() -> Storage:
injector = Injector(modules=[ApplicationModule])
storage = injector.get(Storage)
click.echo(storage.config)
return storage
if __name__ == "__main__":
cli()
|
[
"nortis@yandex.ru"
] |
nortis@yandex.ru
|
476daee09a7b317ae29bc564caa3ecfcaa7b8661
|
0d19301c70152d8fe734fde1c729f393438c8fcc
|
/input_data.py
|
695fcbdeedacc2c801c5ebc27d6bee82566894f4
|
[] |
no_license
|
artemZholus/ladder-tensorflow
|
1456feeff948050228c7240de9783cd26832a44b
|
c3b0a71975cfe2f62b13cdb750628c46d5fee6df
|
refs/heads/master
| 2021-03-30T16:09:02.264693
| 2017-07-23T16:38:13
| 2017-07-23T16:38:13
| 84,014,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,642
|
py
|
"""Functions for downloading and reading MNIST data."""
import gzip
import os
import urllib.request, urllib.parse, urllib.error
import numpy
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def maybe_download(filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
class DataSet(object):
def __init__(self, images, labels, fake_data=False):
if fake_data:
self._num_examples = 10000
else:
# assert images.shape[0] == labels.shape[0], (
# "images.shape: %s labels.shape: %s" % (images.shape,
# labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
# assert images.shape[3] == 1
# images = images.reshape(images.shape[0],
# 784)
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
# images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1.0 for _ in range(784)]
fake_label = 0
return [fake_image for _ in range(batch_size)], [
fake_label for _ in range(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
class SemiDataSet(object):
def __init__(self, x, y, n_labeled, n_classes):
self.n_labeled = n_labeled
# Unlabled DataSet
self.unlabeled_ds = DataSet(x, numpy.zeros((len(x),)))
# Labeled DataSet
self.num_examples = len(y)
# indices = numpy.arange(self.num_examples)
# shuffled_indices = numpy.random.permutation(indices)
# x = x[shuffled_indices]
# y = y[shuffled_indices]
# y_ = numpy.array([numpy.arange(n_classes)[l == 1][0] for l in y])
# idx = indices[y_ == 0][:5]
# n_classes = y_.max() + 1
# n_from_each_class = n_labeled // n_classes
# i_labeled = []
# for c in range(n_classes):
# i = indices[y_ == c][:n_from_each_class]
# i_labeled += list(i)
# l_images = x[i_labeled]
# l_labels = y[i_labeled]
l_images = x[:len(y)]
l_labels = y
self.labeled_ds = DataSet(l_images, l_labels)
def next_batch(self, batch_size):
unlabeled_images, _ = self.unlabeled_ds.next_batch(batch_size)
if batch_size > self.n_labeled:
labeled_images, labels = self.labeled_ds.next_batch(self.n_labeled)
else:
labeled_images, labels = self.labeled_ds.next_batch(batch_size)
images = numpy.vstack([labeled_images, unlabeled_images])
return images, labels
def read_data_sets(train_dir, n_labeled=100, fake_data=False, one_hot=False):
class DataSets(object):
pass
data_sets = DataSets()
if fake_data:
data_sets.train = DataSet([], [], fake_data=True)
data_sets.validation = DataSet([], [], fake_data=True)
data_sets.test = DataSet([], [], fake_data=True)
return data_sets
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 0
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = SemiDataSet(train_images, train_labels, n_labeled, 10)
data_sets.validation = DataSet(validation_images, validation_labels)
data_sets.test = DataSet(test_images, test_labels)
return data_sets
|
[
"zholus@rain.ifmo.ru"
] |
zholus@rain.ifmo.ru
|
66aa66882e164b7ba5a2e59dffbc38c3a2de5e88
|
364e81cb0c01136ac179ff42e33b2449c491b7e5
|
/spell/branches/1.5/spell/spell/utils/getch.py
|
5c987c69b5a5b76082ee96984d748f708592c769
|
[] |
no_license
|
unnch/spell-sat
|
2b06d9ed62b002e02d219bd0784f0a6477e365b4
|
fb11a6800316b93e22ee8c777fe4733032004a4a
|
refs/heads/master
| 2021-01-23T11:49:25.452995
| 2014-10-14T13:04:18
| 2014-10-14T13:04:18
| 42,499,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,330
|
py
|
"""
FILE: getch.py
PACKAGE: spell.utils.getch
PROJECT: SPELL
Copyright (C) 2008, 2010 SES ENGINEERING, Luxembourg S.A.R.L.
This file is part of SPELL.
This library is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation, either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License and GNU General Public License (to which the GNU Lesser
General Public License refers) along with this library.
If not, see <http://www.gnu.org/licenses/>.
"""
import sys
UP = '-up-'
DOWN = '-down-'
LEFT = '-left-'
RIGHT = '-right-'
ESC = '-esc-'
ENTER = '-enter-'
TAB = '-tab-'
################################################################################
class _Getch:
"""
Gets a single character from standard input. Does not echo to the
screen.
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
################################################################################
class _GetchCommon:
scanCode = False
def echo(self, ch):
o = ord(ch)
if self.scanCode:
if o==75:
result = LEFT
elif o==77:
result = RIGHT
elif o==72:
result = UP
elif o==80:
result = DOWN
else:
result = ch
else:
if o==13 or o==10:
sys.stdout.write('\n')
result = ENTER
elif o==9:
sys.stdout.write('\t')
result = TAB
elif o==27:
result = ESC
else:
sys.stdout.write(ch)
result = ch
self.scanCode = False
return result
################################################################################
class _GetchUnix(_GetchCommon):
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
o = ord(ch)
if (o == 0) or (o == 224):
self.scanCode = True
ch = sys.stdin.read(1)
ch = self.echo(ch)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
################################################################################
class _GetchWindows(_GetchCommon):
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
ch = msvcrt.getch()
o = ord(ch)
if (o == 0) or (o == 224):
self.scanCode = True
ch = msvcrt.getch()
ch = self.echo(ch)
return ch
getch = _Getch()
|
[
"rafael.chinchilla@gmail.com"
] |
rafael.chinchilla@gmail.com
|
89db16bf4175545696646d4676ff3cf6f5996ac7
|
cd83397433ee524204b3848f526d7811f3dc44ea
|
/dbdiff/serializers/base.py
|
ed4f8abaea274231d9f39fb72000b6ee690f5f15
|
[] |
no_license
|
yourlabs/django-dbdiff
|
64a141ad7fe0bf4dc80a8865afeb0c2eb8001ac5
|
e94b4e9fde75cc742efe6177ab5a6beb51b80413
|
refs/heads/master
| 2020-12-29T02:42:39.861973
| 2019-01-31T03:02:33
| 2019-01-31T03:02:33
| 48,894,459
| 9
| 4
| null | 2018-11-18T01:38:39
| 2016-01-02T01:02:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,654
|
py
|
"""Shared code for serializers."""
import collections
import datetime
import decimal
class BaseSerializerMixin(object):
"""Serializer mixin for predictible and cross-db dumps."""
@classmethod
def recursive_dict_sort(cls, data):
"""
Return a recursive OrderedDict for a dict.
Django's default model-to-dict logic - implemented in
django.core.serializers.python.Serializer.get_dump_object() - returns a
dict, this app registers a slightly modified version of the default
json serializer which returns OrderedDicts instead.
"""
ordered_data = collections.OrderedDict(sorted(data.items()))
for key, value in ordered_data.items():
if isinstance(value, dict):
ordered_data[key] = cls.recursive_dict_sort(value)
return ordered_data
@classmethod
def remove_microseconds(cls, data):
"""
Strip microseconds from datetimes for mysql.
MySQL doesn't have microseconds in datetimes, so dbdiff's serializer
removes microseconds from datetimes so that fixtures are cross-database
compatible which make them usable for cross-database testing.
"""
for key, value in data['fields'].items():
if not isinstance(value, datetime.datetime):
continue
data['fields'][key] = datetime.datetime(
year=value.year,
month=value.month,
day=value.day,
hour=value.hour,
minute=value.minute,
second=value.second,
tzinfo=value.tzinfo
)
@classmethod
def normalize_decimals(cls, data):
"""
Strip trailing zeros for constitency.
In addition, dbdiff serialization forces Decimal normalization, because
trailing zeros could happen in inconsistent ways.
"""
for key, value in data['fields'].items():
if not isinstance(value, decimal.Decimal):
continue
if value % 1 == 0:
data['fields'][key] = int(value)
else:
data['fields'][key] = value.normalize()
def get_dump_object(self, obj):
"""
Actual method used by Django serializers to dump dicts.
By overridding this method, we're able to run our various
data dump predictability methods.
"""
data = super(BaseSerializerMixin, self).get_dump_object(obj)
self.remove_microseconds(data)
self.normalize_decimals(data)
data = self.recursive_dict_sort(data)
return data
|
[
"jamespic@gmail.com"
] |
jamespic@gmail.com
|
1f9ec68748b277619d402576f66813c50b96a210
|
905987ef54963ca45ec269e1aaf9bf52b32cf10d
|
/favor/apps.py
|
31d7e72a6ffbeb6223493934ad333a97227e2d3a
|
[] |
no_license
|
winglq/site
|
77fd0d94145cc542c162b766aa51765276bff721
|
48337832c9a1b096ab37f1838510f1678d25bc47
|
refs/heads/master
| 2021-01-20T21:12:46.721860
| 2017-04-15T13:21:27
| 2017-04-15T13:21:27
| 64,014,668
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class FavorConfig(AppConfig):
name = 'favor'
|
[
"winglq@gmail.com"
] |
winglq@gmail.com
|
4b0abd262747d285c524c151ba6c1d6e5fda3e59
|
b557945a0979a5a94c0298ca037d590ef7c5ff69
|
/business-logic-server/icarus_backend/user/userViewSchemas.py
|
69d6b46798c85887767f36e8cd63a73660624d2a
|
[
"MIT"
] |
permissive
|
samcrane8/FlyRight
|
2147a8d230fbd86c9ca3e5b2c74381a4edc752ef
|
afa056045899859107dddbbd21a89d31dbce74c4
|
refs/heads/master
| 2022-12-19T00:13:57.468534
| 2019-11-25T19:27:43
| 2019-11-25T19:27:43
| 185,258,527
| 21
| 5
|
MIT
| 2022-12-07T21:04:13
| 2019-05-06T19:26:04
|
Vue
|
UTF-8
|
Python
| false
| false
| 528
|
py
|
from schema import And, Optional
register_user_schema = {
"username": And(str),
"password": And(str),
"email": And(str),
"first_name": And(str),
"last_name": And(str)
}
update_user_info_schema = {
Optional("username"): And(str),
Optional("password"): And(str),
Optional("email"): And(str),
Optional("picture_url"): And(str),
Optional("first_name"): And(str),
Optional("last_name"): And(str)
}
change_password_schema = {
"old_password": And(str),
"new_password": And(str)
}
|
[
"samcrane8@gmail.com"
] |
samcrane8@gmail.com
|
80ba4626df8b1704b0a47f7c2210a1d81421a852
|
7c6b70a535823998322d9166fa4e07f6a078dd58
|
/find_uap.py
|
c3b66a20bd310e960f8c774c91031979dd891ee4
|
[] |
no_license
|
zhanzheng8585/universal_pytorch
|
fe1c76a20be7c530932dc334af7ad3d883349eda
|
6ae4dbb9795026a895b187063a9d6bd1e2842dd7
|
refs/heads/master
| 2022-01-26T10:38:28.835035
| 2019-07-15T13:41:38
| 2019-07-15T13:41:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,598
|
py
|
#############################################################
import utils
import torch.backends.cudnn as cudnn
cudnn.enabled = False
##############################################################
from docopt import docopt
import time
import torch
import torchvision
import numpy as np
import torch.optim as optim
docstr = """Find Universal Adverserial Perturbation for Image Classification models trained in pytorch.
Usage:
find_uap.py <model> <im_path> <im_list> [options]
find_uap.py (-h | --help)
find_uap.py --version
Options:
-h --help Show this screen.
--version Show version.
--data_dep=<bool> Use data for finding UAP or not.[default: False]
--save_loc=<str> Location for saving the UAP as FloatTensor[default: same_dir]
--batch_size=<int> batch_size for processing while forming UAP in gpu[default: 25]
--gpu=<bool> Which GPU to use[default: 3]
--max_iter_uni=<int> maximum epochs to train for[default: 10]
--xi=<float> controls the l_p magnitude of the perturbation[default: 0.1866]
--delta=<float> controls the desired fooling rate[default: 0.2]
--p=<float> norm to be used for the UAP[default: inf]
--num_classes=<int> For deepfool: num_classes (limits the number of classes to test against)[default: 10]
--overshoot=<float> For deepfool: used as a termination criterion to prevent vanishing updates[default: 0.02]
--max_iter_df=<int> For deepfool: maximum number of iterations for deepfool[default: 10]
--t_p=<float> For batch deepfool: truth perentage, for how many flipped labels in a batch atleast.[default: 0.2]
"""
if __name__ == '__main__':
start_time = time.time()
args = docopt(docstr, version='v1.0')
torch.cuda.set_device(int(args['--gpu']))
net = utils.get_model(args['<model>'])
location_img = args['<im_path>']
img_list = args['<im_list>']
max_iter_uni=int(args['--max_iter_uni'])
xi=float(args['--xi'])
delta=float(args['--delta'])
if(args['--p'] == 'inf'):
p = np.inf
else:
p=int(args['--p'])
if(args['--save_loc'] == 'same_dir'):
save_loc = '.'
else:
save_loc = args['--save_loc']
num_classes=int(args['--num_classes'])
overshoot=float(args['--overshoot'])
max_iter_df=int(args['--max_iter_df'])
t_p=float(args['--t_p'])
file = open(img_list)
img_names = []
for f in file:
img_names.append(f.split(' ')[0])
img_names = [location_img +x for x in img_names]
st = time.time()
if(eval(args['--data_dep'])):
batch_size = 1
uap = utils.universal_perturbation_data_dependant(img_names, net, xi=xi, delta=delta, max_iter_uni =max_iter_uni,
p=p, num_classes=num_classes, overshoot=overshoot,
max_iter_df=max_iter_df,init_batch_size = batch_size,t_p = t_p)
else:
batch_size = int(args['--batch_size'])
uap = utils.universal_perturbation_data_independant(img_names, net,delta=delta, max_iter_uni = max_iter_uni, xi=xi,
p=p, num_classes=num_classes, overshoot=overshoot,
max_iter_df=max_iter_df,init_batch_size=batch_size)
print('found uap.Total time: ' ,time.time()-st)
uap = uap.data.cpu()
torch.save(uap,save_loc+'perturbation_'+args['<model>']+'.pth')
|
[
"adityaganeshan@gmail.com"
] |
adityaganeshan@gmail.com
|
3f00c8d560d64f2325e632791586c0e1b4eb96b2
|
c67a53ad4a550cba17b5826d91baf8ea61d32168
|
/main.py
|
557ae68f951911e441957039106640bd419a960c
|
[] |
no_license
|
aiueoa/examplebase_bot
|
b826a3ddaa3522d86a458327bf7873ec18837af1
|
73d5816a15c12b50b31da859209958b40a4a7653
|
refs/heads/master
| 2022-04-13T08:06:19.332221
| 2020-03-08T02:49:35
| 2020-03-08T02:49:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
# coding: utf-8
import pandas as pd
import MeCab
from gensim.models.word2vec import Word2Vec
from gensim.models.keyedvectors import KeyedVectors
import item #自作モジュール
mecab = MeCab.Tagger('-d /usr/local/lib/mecab/dic/mecab-ipadic-neologd')
print("学習済みモデル読み込み")
model_path = 'model/word2vec.gensim.model'
model = Word2Vec.load(model_path)
print("読み込み終わり")
df = pd.read_csv('data.csv')
df = df.dropna()
def ans(text, df):
text_wakati = item.morpheme_list(text)#入力文の形態素解析
wmd = lambda x: model.wv.wmdistance(text_wakati, x)
result = df['input_wakati'].map(wmd).idxmin()
return df['output'].iloc[result]
while True:
text = input(">>>")
if text == "quit":
break
r = ans(text, df)
print(r)
|
[
"kosekitau@gmail.com"
] |
kosekitau@gmail.com
|
557fea3c9fe7c4d56775ecc15fa1458ff3c3c11d
|
f260d477a80eb0baeae9bb7721a2bd12414ffe44
|
/dictionary_practice.py
|
a16c6893f803605466b19e373897ee5eef0a527a
|
[] |
no_license
|
emilymorgado/dictionary_practice.py
|
752e0e029b722ed33640d8e1d3bb5e3dc8e39cd5
|
c1a3eb3b80022c6e4ea3c96c563f5df44693bb6e
|
refs/heads/master
| 2021-01-10T12:38:29.905706
| 2015-09-30T03:37:18
| 2015-09-30T03:37:18
| 43,409,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
colors = {
"green": {
"pink": ["red", "white"],
"maroon": ["red", "purple"]
},
"grey": {
"sky": ["blue", "white"],
"purple": ["blue", "red"]
}
}
def print_colors(input_dict):
"""Iterates both the dictionaries and the lists and prints logical sentences
list.items() indexes items, making looping possible"""
for lead_color, secondary_colors in colors.items():
# print lead_color, "is unrelated to the others"
# the above line runs, bt the next print is even better!
printable_secondary_colors = secondary_colors.keys()
print lead_color, "is unrealted to {} and {}".format(printable_secondary_colors[0], printable_secondary_colors[1])
for secondary_colors, color_mixes in secondary_colors.items():
print "{} is made of {} and {}".format(
secondary_colors,
color_mixes[0],
color_mixes[1]
)
print_colors(colors)
# print colors["red"]
# {'pink': ['red', 'white'], 'maroon': ['red', 'purple']}
# print colors["red"]["pink"]
# ['red', 'white']
# print colors["red"]["pink"][1]
# 'white'
# print colors["red"]["pink"][1][0]
# 'w'
|
[
"emilymorgado@Emily-the-elusives-MacBook-Pro.local"
] |
emilymorgado@Emily-the-elusives-MacBook-Pro.local
|
951177eca75ca3f5c9f4e40a2263151f8faf76c0
|
d05c946e345baa67e7894ee33ca21e24b8d26028
|
/ethical-hacking/get-wifi-passwords/get_wifi_passwords.py
|
0afd70caf1d0365dd1e562a2de798e75af6facc2
|
[
"MIT"
] |
permissive
|
x4nth055/pythoncode-tutorials
|
327255550812f84149841d56f2d13eaa84efd42e
|
d6ba5d672f7060ba88384db5910efab1768c7230
|
refs/heads/master
| 2023-09-01T02:36:58.442748
| 2023-08-19T14:04:34
| 2023-08-19T14:04:34
| 199,449,624
| 1,858
| 2,055
|
MIT
| 2023-08-25T20:41:56
| 2019-07-29T12:35:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,167
|
py
|
import subprocess
import os
import re
from collections import namedtuple
import configparser
def get_windows_saved_ssids():
"""Returns a list of saved SSIDs in a Windows machine using netsh command"""
# get all saved profiles in the PC
output = subprocess.check_output("netsh wlan show profiles").decode()
ssids = []
profiles = re.findall(r"All User Profile\s(.*)", output)
for profile in profiles:
# for each SSID, remove spaces and colon
ssid = profile.strip().strip(":").strip()
# add to the list
ssids.append(ssid)
return ssids
def get_windows_saved_wifi_passwords(verbose=1):
"""Extracts saved Wi-Fi passwords saved in a Windows machine, this function extracts data using netsh
command in Windows
Args:
verbose (int, optional): whether to print saved profiles real-time. Defaults to 1.
Returns:
[list]: list of extracted profiles, a profile has the fields ["ssid", "ciphers", "key"]
"""
ssids = get_windows_saved_ssids()
Profile = namedtuple("Profile", ["ssid", "ciphers", "key"])
profiles = []
for ssid in ssids:
ssid_details = subprocess.check_output(f"""netsh wlan show profile "{ssid}" key=clear""").decode()
# get the ciphers
ciphers = re.findall(r"Cipher\s(.*)", ssid_details)
# clear spaces and colon
ciphers = "/".join([c.strip().strip(":").strip() for c in ciphers])
# get the Wi-Fi password
key = re.findall(r"Key Content\s(.*)", ssid_details)
# clear spaces and colon
try:
key = key[0].strip().strip(":").strip()
except IndexError:
key = "None"
profile = Profile(ssid=ssid, ciphers=ciphers, key=key)
if verbose >= 1:
print_windows_profile(profile)
profiles.append(profile)
return profiles
def print_windows_profile(profile):
"""Prints a single profile on Windows"""
print(f"{profile.ssid:25}{profile.ciphers:15}{profile.key:50}")
def print_windows_profiles(verbose):
"""Prints all extracted SSIDs along with Key on Windows"""
print("SSID CIPHER(S) KEY")
print("-"*50)
get_windows_saved_wifi_passwords(verbose)
def get_linux_saved_wifi_passwords(verbose=1):
"""Extracts saved Wi-Fi passwords saved in a Linux machine, this function extracts data in the
`/etc/NetworkManager/system-connections/` directory
Args:
verbose (int, optional): whether to print saved profiles real-time. Defaults to 1.
Returns:
[list]: list of extracted profiles, a profile has the fields ["ssid", "auth-alg", "key-mgmt", "psk"]
"""
network_connections_path = "/etc/NetworkManager/system-connections/"
fields = ["ssid", "auth-alg", "key-mgmt", "psk"]
Profile = namedtuple("Profile", [f.replace("-", "_") for f in fields])
profiles = []
for file in os.listdir(network_connections_path):
data = { k.replace("-", "_"): None for k in fields }
config = configparser.ConfigParser()
config.read(os.path.join(network_connections_path, file))
for _, section in config.items():
for k, v in section.items():
if k in fields:
data[k.replace("-", "_")] = v
profile = Profile(**data)
if verbose >= 1:
print_linux_profile(profile)
profiles.append(profile)
return profiles
def print_linux_profile(profile):
"""Prints a single profile on Linux"""
print(f"{str(profile.ssid):25}{str(profile.auth_alg):5}{str(profile.key_mgmt):10}{str(profile.psk):50}")
def print_linux_profiles(verbose):
"""Prints all extracted SSIDs along with Key (PSK) on Linux"""
print("SSID AUTH KEY-MGMT PSK")
print("-"*50)
get_linux_saved_wifi_passwords(verbose)
def print_profiles(verbose=1):
if os.name == "nt":
print_windows_profiles(verbose)
elif os.name == "posix":
print_linux_profiles(verbose)
else:
raise NotImplemented("Code only works for either Linux or Windows")
if __name__ == "__main__":
print_profiles()
|
[
"fullclip@protonmail.com"
] |
fullclip@protonmail.com
|
ebafaa62b947ba7156ff0a056e3ac8e2bdaac1ea
|
52398c45a9ee39022efe5f909ef2cd29042e7d84
|
/lambda-run_instance_handler/lambda_function.py
|
212ebb5007a73403fbf8de5951ca3077dfbc04ac
|
[
"Unlicense"
] |
permissive
|
hudl/Tyr
|
f3a42d84430619915f4d9a34193aa626e069a9fb
|
7a665e5f55a269ccfa2a001f79e4396e728aa39b
|
refs/heads/v2
| 2021-09-08T20:25:16.713802
| 2021-09-06T11:58:56
| 2021-09-06T11:58:56
| 31,391,696
| 10
| 4
|
Unlicense
| 2021-09-06T11:58:57
| 2015-02-26T22:15:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
import urllib.request
import json
import boto3
import os
ec2 = boto3.client('ec2', region_name='us-east-1')
s3 = boto3.client('s3')
def plant_flag(ticket):
return
s3.put_object(
Body='',
Bucket=os.environ['S3_OUTPUT_BUCKET'],
Key=ticket
)
def ticket_in_flight(ticket):
return False
resp = s3.list_objects_v2(
Bucket=os.environ['S3_OUTPUT_BUCKET'],
MaxKeys=123,
Prefix=ticket
)
return len(resp.get('Contents', [])) > 0
def provision(event):
payload = json.loads(event['Records'][0]['body'])
if 'ticket' not in payload:
raise Exception('No ticket provided')
ticket = payload['ticket']
print(f'Recieved ticket {ticket}')
#if ticket_in_flight(ticket):
# print('Ticket already being processed')
# return
#plant_flag(ticket)
input_ = payload['input']
input_['MinCount'] = 1
input_['MaxCount'] = 1
resp = {}
try:
r = ec2.run_instances(**input_)
print(f'Provisioned instance: {r["Instances"][0]["InstanceId"]}')
resp['instance_id'] = r['Instances'][0]['InstanceId']
if len(s3.list_objects_v2(
Bucket=os.environ['S3_OUTPUT_BUCKET'],
MaxKeys=123,
Prefix=ticket
).get('Contents', [])) > 0:
return
except Exception as ex:
resp['error'] = {
'message': str(ex),
'type': ex.__class__.__name__
}
s3.put_object(
Body=json.dumps(resp),
Bucket=os.environ['S3_OUTPUT_BUCKET'],
Key=payload['ticket']
)
def lambda_handler(event, context):
try:
provision(event)
except Exception as ex:
s3.put_object(
Body=json.dumps({
'error': {
'message': str(ex),
'type': ex.__class__.__name__
}
}),
Bucket=os.environ['S3_OUTPUT_BUCKET'],
Key='error'
)
|
[
"derek.nordgren@hudl.com"
] |
derek.nordgren@hudl.com
|
f7bac8c5536e898c08937724852e488e3150be07
|
b52987ca7ffe71f3f75f5271d86aca619191920e
|
/james_login/login/migrations/0006_products_mysku.py
|
59f74288fc0d7192e52178648498f16dd9429243
|
[] |
no_license
|
turpure/project
|
6688bbd481d60cba3bad8a2efd3e3a656820b752
|
782318b430bb886c3c537180f4f117a6021fe292
|
refs/heads/master
| 2020-06-21T05:54:48.214234
| 2017-06-21T03:36:50
| 2017-06-21T03:36:50
| 74,800,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-28 07:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0005_auto_20160928_1458'),
]
operations = [
migrations.AddField(
model_name='products',
name='mysku',
field=models.CharField(max_length=20, null=True),
),
]
|
[
"p812816627@163..com"
] |
p812816627@163..com
|
35aa5862cd45d61f373ae1e8f950f5f446d752e7
|
395c3fcf438a658339cac2c94eb84d81cc3ee173
|
/portal/migrations/0004_remove_datosclientes_comprobantep.py
|
2b1a4b8161f151f3243cfe5e626c8a3a2cfd8b96
|
[] |
no_license
|
diegocent07/appclient
|
a1015cc1fbb3b9c93978a0d51ea4b28e2b146217
|
42842b53ff6514d823998f8d550639e95ea44f8e
|
refs/heads/master
| 2023-02-15T06:42:35.560622
| 2021-01-13T14:59:38
| 2021-01-13T14:59:38
| 292,563,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
# Generated by Django 3.0.6 on 2020-09-07 13:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portal', '0003_datosclientes_comprobantep'),
]
operations = [
migrations.RemoveField(
model_name='datosclientes',
name='comprobantep',
),
]
|
[
"diego@topdek.com.br"
] |
diego@topdek.com.br
|
052528f4ac128b97f49e84e6f38cd025620a410f
|
82f989cd3e0dbf121e82daf70494ac51850bd63a
|
/app.py
|
7db186e2441e1b8a7f0645227b02ec4d554eed70
|
[
"MIT"
] |
permissive
|
PurpleGray/telegram-google-calendar-bot
|
264c7dfe938385211460852fb518356fa86f1a1b
|
165c0b5ccba3e81e19e79816ca2e53dd2c3ed530
|
refs/heads/master
| 2021-03-27T19:27:31.591373
| 2018-02-27T15:07:47
| 2018-02-27T15:07:47
| 112,594,779
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
import telebot
from telebot import types
from config import Config
import logging
from flask import Flask
from flask_peewee.db import Database
import uuid
# Create Flask microserver
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
app.config.from_object('config.Configuration')
# Create DB
db = Database(app)
# Instantiate config
config = Config.instance()
# Configure bot logger
logger = telebot.logger
telebot.logger.setLevel(logging.DEBUG)
# Instantiate bot
bot = telebot.TeleBot(Config.instance().telegram_api_token)
|
[
"ValterBishop@gmail.com"
] |
ValterBishop@gmail.com
|
14f2b7f291c06d0f06f8de1f1cf0f5f815e78c31
|
e9086da2bf28de83356bb08d4aafac48c463a80c
|
/tests/bench/test_aead.py
|
f93c4e8892eb62d81fb09b7d258b81eae7c643be
|
[
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
pyca/cryptography
|
e5797eeabae127bc4c6c306e74993aef24c059b3
|
d182176fbfb6f6aeb8856952d36d999c20f456ea
|
refs/heads/main
| 2023-09-03T08:25:47.396782
| 2023-09-03T00:17:25
| 2023-09-03T00:17:25
| 11,939,484
| 5,709
| 1,793
|
NOASSERTION
| 2023-09-14T19:50:49
| 2013-08-07T02:23:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,951
|
py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import pytest
from cryptography.hazmat.primitives.ciphers.aead import (
AESCCM,
AESGCM,
AESOCB3,
AESSIV,
ChaCha20Poly1305,
)
from ..hazmat.primitives.test_aead import _aead_supported
@pytest.mark.skipif(
not _aead_supported(ChaCha20Poly1305),
reason="Requires OpenSSL with ChaCha20Poly1305 support",
)
def test_chacha20poly1305_encrypt(benchmark):
chacha = ChaCha20Poly1305(b"\x00" * 32)
benchmark(chacha.encrypt, b"\x00" * 12, b"hello world plaintext", b"")
@pytest.mark.skipif(
not _aead_supported(ChaCha20Poly1305),
reason="Requires OpenSSL with ChaCha20Poly1305 support",
)
def test_chacha20poly1305_decrypt(benchmark):
chacha = ChaCha20Poly1305(b"\x00" * 32)
ct = chacha.encrypt(b"\x00" * 12, b"hello world plaintext", b"")
benchmark(chacha.decrypt, b"\x00" * 12, ct, b"")
def test_aesgcm_encrypt(benchmark):
aes = AESGCM(b"\x00" * 32)
benchmark(aes.encrypt, b"\x00" * 12, b"hello world plaintext", None)
def test_aesgcm_decrypt(benchmark):
aes = AESGCM(b"\x00" * 32)
ct = aes.encrypt(b"\x00" * 12, b"hello world plaintext", None)
benchmark(aes.decrypt, b"\x00" * 12, ct, None)
@pytest.mark.skipif(
not _aead_supported(AESSIV),
reason="Requires OpenSSL with AES-SIV support",
)
def test_aessiv_encrypt(benchmark):
aes = AESSIV(b"\x00" * 32)
benchmark(aes.encrypt, b"hello world plaintext", None)
@pytest.mark.skipif(
not _aead_supported(AESSIV),
reason="Requires OpenSSL with AES-SIV support",
)
def test_aessiv_decrypt(benchmark):
aes = AESSIV(b"\x00" * 32)
ct = aes.encrypt(b"hello world plaintext", None)
benchmark(aes.decrypt, ct, None)
@pytest.mark.skipif(
not _aead_supported(AESOCB3),
reason="Requires OpenSSL with AES-OCB3 support",
)
def test_aesocb3_encrypt(benchmark):
aes = AESOCB3(b"\x00" * 32)
benchmark(aes.encrypt, b"\x00" * 12, b"hello world plaintext", None)
@pytest.mark.skipif(
not _aead_supported(AESOCB3),
reason="Requires OpenSSL with AES-OCB3 support",
)
def test_aesocb3_decrypt(benchmark):
aes = AESOCB3(b"\x00" * 32)
ct = aes.encrypt(b"\x00" * 12, b"hello world plaintext", None)
benchmark(aes.decrypt, b"\x00" * 12, ct, None)
@pytest.mark.skipif(
not _aead_supported(AESCCM),
reason="Requires OpenSSL with AES-CCM support",
)
def test_aesccm_encrypt(benchmark):
aes = AESCCM(b"\x00" * 32)
benchmark(aes.encrypt, b"\x00" * 12, b"hello world plaintext", None)
@pytest.mark.skipif(
not _aead_supported(AESCCM),
reason="Requires OpenSSL with AES-CCM support",
)
def test_aesccm_decrypt(benchmark):
aes = AESCCM(b"\x00" * 32)
ct = aes.encrypt(b"\x00" * 12, b"hello world plaintext", None)
benchmark(aes.decrypt, b"\x00" * 12, ct, None)
|
[
"noreply@github.com"
] |
pyca.noreply@github.com
|
30a233993574c94b85769c43a11077769cd66e66
|
b426e963c20e65395796e0da7777b0f0f218a3a9
|
/auctions/migrations/0004_auto_20200713_1354.py
|
b31b14b7d69f1a1706ec1ea189c35b8e9c6c3414
|
[] |
no_license
|
viren-patel18/cs50_project2
|
836b50a452822d5dfb7ada6652c835d60390242f
|
9274c1aee03c80de389662b2e8cf1e04f33e7ead
|
refs/heads/master
| 2022-11-24T03:42:22.371266
| 2020-07-14T20:51:03
| 2020-07-14T20:51:03
| 279,124,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
# Generated by Django 3.0.8 on 2020-07-13 17:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('auctions', '0003_listing_active'),
]
operations = [
migrations.RemoveField(
model_name='bid',
name='listing',
),
migrations.RemoveField(
model_name='category',
name='listings',
),
migrations.RemoveField(
model_name='comment',
name='listing',
),
migrations.RemoveField(
model_name='watchlist',
name='listings',
),
]
|
[
"vponpoint@gmail.com"
] |
vponpoint@gmail.com
|
3406e1737f01e6922dbc3ecb6e14192117807d84
|
484e328340b5ba43356d96c43fd08011d4164319
|
/main.py
|
9d84ecbf8439a8b4504800bbc87bdf7b30fdf87f
|
[] |
no_license
|
mfkiwl/mcm-on-fpga
|
61bbab0826c59279c13d1a760272f7b7241d96b6
|
99bbe0d605898268f0194ee9de4fda559cff5c04
|
refs/heads/master
| 2022-09-13T08:13:05.070671
| 2020-05-31T20:52:35
| 2020-05-31T20:52:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,654
|
py
|
# Copyright 2018
#
# Ahmet Can Mert <ahmetcanmert@sabanciuniv.edu>
# Hasan Azgin <hasanazgin@sabanciuniv.edu>
# Ercan Kalali <ercankalali@sabanciuniv.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mcm import *
# bit_size_signed: bit-size of signed input variable
# coefficients : list of UNSIGNED coefficients
# Input
bit_size_signed = 8
constants = [4,8,9,5748,25,22,87,974,11,44,86746874,212454575487857]
# MCM operation
[MAPPED, POWER2, SHIFTED, NOT_MAPPED, CONSTANTS] = dsp_mapping(bit_size_signed,constants)
# print information
print "--------------------------------------------"
print
print "Input bit size :",bit_size_signed
print "Input constants:",constants
print
print str(len(CONSTANTS))+" constants are generated using "+str(len(MAPPED))+" DSP blocks."
print
print "Constants generated:"
for i in range(len(MAPPED)):
print "* DSP #"+str(i)+":",MAPPED[i]
print
print "Constants as power-of-two:", [y[1] for y in POWER2]
print "Constants shifted :", [(str(z[1])+" from "+str(z[0])) for z in SHIFTED]
print "Constants not mapped :", NOT_MAPPED
print
print "--------------------------------------------"
|
[
"noreply@github.com"
] |
mfkiwl.noreply@github.com
|
62785c1466ba061187c0717c5a3467502d580999
|
2246b25c8f23961e4d298c80e801face0ba25175
|
/KNN/ItemKNN_CFCBF_Hybrid_Recommender.py
|
ef446822d329ddff5bd5ff680f0e6d3007a6bb4e
|
[] |
no_license
|
giovanni-bozzano/polimi-recsys-porting
|
64c7d5d0f72977f91c206e3627f98bc8ffe717d9
|
e2722c6948330dec541bd2d2e02b83b5c65320de
|
refs/heads/master
| 2022-10-24T02:45:30.831754
| 2020-06-17T19:53:35
| 2020-06-17T19:53:35
| 246,338,131
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 23/10/17
@author: Maurizio Ferrari Dacrema
"""
from KNN.ItemKNNCBFRecommender import ItemKNNCBFRecommender
import scipy.sparse as sps
import numpy as np
class ItemKNN_CFCBF_Hybrid_Recommender(ItemKNNCBFRecommender):
""" ItemKNN_CFCBF_Hybrid_Recommender"""
RECOMMENDER_NAME = "ItemKNN_CFCBF_HybridRecommender"
def fit(self, ICM_weight = 1.0, **fit_args):
self.ICM_train = self.ICM_train*ICM_weight
self.ICM_train = sps.hstack([self.ICM_train, self.URM_train.T], format='csr')
super(ItemKNN_CFCBF_Hybrid_Recommender, self).fit(**fit_args)
def _get_cold_item_mask(self):
return np.logical_and(self._cold_item_CBF_mask, self._cold_item_mask)
|
[
"giovanni.bozzano.1996@gmail.com"
] |
giovanni.bozzano.1996@gmail.com
|
6d21b960b12b9032ee987bba6ff41f2729b421d8
|
7367e81f0e3e07d57012e9da7cb48ba4d3bd8ca7
|
/services/face_detection/detection_opencv.py
|
969d6e547ec1a44489f642c08e755ffc69db494f
|
[] |
no_license
|
nikitos9000/VideoMoji
|
9bc85d2812ad990d8696d533e1faf9959a39c70e
|
dc0e5b8b1c0b26d05f7615b61cd3dae4d18b24e4
|
refs/heads/master
| 2021-06-20T22:35:30.361732
| 2017-08-06T10:29:41
| 2017-08-06T10:29:41
| 89,780,126
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
import cv2
faceCascade = cv2.CascadeClassifier('data/face_detection/haarcascade_frontalface_default.xml')
def detect(frame):
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY, 1)
faces = faceCascade.detectMultiScale(
img_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
return [dict(rect=list(face_rect)) for face_rect in faces]
|
[
"nikita@luka.ai"
] |
nikita@luka.ai
|
b2ba378584d5a1fd122e930f89910efba321a87e
|
d46fff914ffe82c7b13c67292a41ba5766b875cd
|
/python3-virtualenv/bin/pyrsa-decrypt
|
bca07fc6ce4f9981df088e284fd791127a790a26
|
[
"MIT"
] |
permissive
|
ariceeee/mlh-portfolio
|
a70dcaa005f96c40163f0cdd9046dc310a34823f
|
ad82b7194714a7274ec01de9ff85063732cb91fc
|
refs/heads/master
| 2023-06-20T23:29:55.554001
| 2021-07-21T18:22:55
| 2021-07-21T18:22:55
| 378,275,549
| 0
| 0
| null | 2021-07-02T16:12:05
| 2021-06-18T22:16:03
|
CSS
|
UTF-8
|
Python
| false
| false
| 253
|
#!/home/centos/mlh-portfolio/python3-virtualenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import decrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(decrypt())
|
[
"centos@ip-172-31-1-192.ca-central-1.compute.internal"
] |
centos@ip-172-31-1-192.ca-central-1.compute.internal
|
|
a0a90c0230986fdf5685f3c90a42b56ce4b9c9e7
|
3c2b5fd20c7372fccb97fa76deb0980a173b5991
|
/PythonFullStack/000Basic/Day01-基础/day01/09-格式化输出.py
|
99e642a65c34863eb08fda93902e09a240a0937d
|
[] |
no_license
|
softwarefaith/PythonFullStack
|
560cdc2c0c38831e8304751b8b2bf680cb2f23e5
|
292cc0a5eee3ed8eb8a8d5e14673226533d2651e
|
refs/heads/master
| 2021-05-15T09:57:37.812869
| 2019-02-21T10:37:41
| 2019-02-21T10:37:41
| 108,229,662
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
#格式化符号:%s %d %f %x
#%s:输出字符串
#%d:输出int类型
#%f 输出float类型
#%x输出16位进制数据
name = "张三丰"
print("我叫%s" % name)
score = 100
print("python的考试分数:%d" % score)
#格式化之后,小数保留6位,四舍五入
pi = 3.1415926
print("圆周率:%f" %pi)
#2进制,8进制,10进制 16进制
num = 16
print("%x"%num)
s = 'abcdef'
s.replace('abc','eeeeee')
print(s)
|
[
"jie.cai@mljr.com"
] |
jie.cai@mljr.com
|
e61fe40ed4f095ddc70cab3393e488e4d8dc5a22
|
704cb7dc6d2c2e6a6a2cc813216e0dd858715198
|
/Graphs/random.py
|
1a165d3f5863462919bb7b1460a8c385da022df3
|
[] |
no_license
|
redixhumayun/ctci
|
094558bc288d92cb6c1c7aa12d0d926637835633
|
6773aa9756a88a835c13fdf536ab0e09b69ee0bd
|
refs/heads/master
| 2021-09-13T01:53:01.771471
| 2018-04-23T16:46:18
| 2018-04-23T16:46:18
| 97,685,236
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
import os
path = os.path.abspath(os.sep) + '/Users/zaidhumayun/Downloads'
if os.path.exists(path):
os.chdir(path)
with open('./Game.of.Thrones.S07E05.Eastwatch.1080p.10bit.WEBRip.6CH.x265.HEVC-PSA.mkv', 'rb') as f:
byte = f.read(32)
while byte != '':
print(byte)
|
[
"redixhumayun@gmail.com"
] |
redixhumayun@gmail.com
|
9f97b9ac14222144733f31ef0bed639a73b8699e
|
c5abb318944748177a07c6586d2c381fb3afd296
|
/team_builder/accounts/migrations/0004_auto_20180308_1834.py
|
bafb8b04536a5fb1cb94b184f94d006914468b55
|
[] |
no_license
|
charlesseymour/Treehouse-Python-Techdegree-Project-12
|
2dcc8a8898310f9c3f212b284e8398802a205160
|
df417a3aec54031f988ca5b2a8cf3456767f0639
|
refs/heads/master
| 2021-01-25T12:42:14.752110
| 2018-04-18T00:55:58
| 2018-04-18T00:55:58
| 122,875,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# Generated by Django 2.0.2 on 2018-03-09 00:34
# flake8: noqa
import accounts.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20180303_1744'),
]
operations = [
migrations.AlterField(
model_name='user',
name='avatar',
field=models.ImageField(blank=True, null=True, upload_to=accounts.models.user_directory_path),
),
]
|
[
"charlesseymour@yahoo.com"
] |
charlesseymour@yahoo.com
|
67f925a04c62bc0d73196c16448e8b32b9b077c5
|
27a5087dfb2b47d986f0af7d29737e410bbded4c
|
/calculadora.py
|
2d8f599f7ce587ce3b14918fff73e4a0fa61cd00
|
[] |
no_license
|
caiositta/Ac3ArquiteturaCalcTest
|
8a4916e94599876ceee4c38b9d561f665fb3e862
|
7e3b93b612541f76ab59bdeed98885716f776e90
|
refs/heads/main
| 2023-08-21T23:02:38.770080
| 2021-09-20T21:54:02
| 2021-09-20T21:54:02
| 408,602,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
from unittest import main
import unittest
from abc import ABCMeta, abstractmethod
class Calculadora (object):
def calcular(self, valor1, valor2, operador):
operacaoFabrica = OperacaoFabrica()
operacao = operacaoFabrica.criar(operador)
if(operacao == None):
return 0
else:
resultado = operacao.executar(valor1, valor2)
return resultado
class OperacaoFabrica (object):
def criar(self, operador):
if(operador == 'soma'):
return Soma()
elif (operador == 'subtracao'):
return Subtracao()
elif (operador == 'divisao'):
return Divisao()
elif (operador == 'multiplicacao'):
return Multiplicacao()
class Operacao(metaclass=ABCMeta):
@abstractmethod
def executar(self, valor1, valor2):
pass
class Soma(Operacao):
def executar(self, valor1, valor2):
resultado = valor1 + valor2
return resultado
class Subtracao(Operacao):
def executar(self, valor1, valor2):
resultado = valor1 - valor2
return resultado
class Divisao(Operacao):
def executar(self, valor1, valor2):
resultado = valor1 / valor2
return resultado
class Multiplicacao(Operacao):
def executar(self, valor1, valor2):
resultado = valor1 * valor2
return resultado
class Testes(unittest.TestCase):
def test_somar(self):
calculador = Calculadora()
result = calculador.calcular(2,3, 'soma')
self.assertEqual(result, 5)
def test_subtracao(self):
calculador = Calculadora()
result = calculador.calcular(2,3, 'subtracao')
self.assertEqual(result, -1)
def test_divisao(self):
calculador = Calculadora()
result = calculador.calcular(2,4, 'divisao')
self.assertEqual(result, 0.5)
def test_multiplicacao(self):
calculador = Calculadora()
result = calculador.calcular(2,3, 'multiplicacao')
self.assertEqual(result, 6)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
caiositta.noreply@github.com
|
558550494ad378925abe663ca14b865bd0704dc2
|
0abbb8d6c0093abd69ff9bcec1477d4390bd8d12
|
/bg.py
|
ad7ecdbf10254a665d02b0ff3a7ef85e623d696b
|
[] |
no_license
|
april9288/firestorm
|
a7fb912ef96fe42be8bd658fbf5cfa0507dfdab9
|
23df116b380778489a19914cae833dbe43ff2c74
|
refs/heads/master
| 2020-03-29T13:18:38.387001
| 2018-09-25T07:55:20
| 2018-09-25T07:55:20
| 149,951,433
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
import pygame
from settings import Settings
class BG():
def __init__(self, fs_settings, screen):
self.screen = screen
self.fs_settings = fs_settings
self.image1 = pygame.image.load('images/bg1.jpg')
self.image2 = pygame.image.load('images/bg1.jpg')
self.rect1 = self.image1.get_rect()
self.rect2 = self.image2.get_rect()
self.screen_rect = screen.get_rect()
self.rect1.left = 0
self.rect2.left = fs_settings.screen_width
self.bg1x = float(self.rect1.left)
self.bg2x = float(self.rect2.left)
def update(self):
self.bg1x -= 2
self.bg2x -= 2
if self.bg1x == -self.fs_settings.screen_width:
self.bg1x = self.fs_settings.screen_width
if self.bg2x == -self.fs_settings.screen_width:
self.bg2x = self.fs_settings.screen_width
self.rect1.left = self.bg1x
self.rect2.left = self.bg2x
def blitme(self):
self.screen.blit(self.image1, self.rect1)
self.screen.blit(self.image2, self.rect2)
|
[
"april9288@gmail.com"
] |
april9288@gmail.com
|
c0dbb8efb76f7df94346658624006426b8b4d548
|
03a762018bca674b19dcbf2c7cd0b249358bd753
|
/regular_expression.py
|
6ff09c97cc51d900d80f598d254a126d973e7f24
|
[] |
no_license
|
picksmania/personal
|
a7697d06015559109ee2aa6be11f9164df544e33
|
2496d4609d9977dd706aae27977c8bffbe0955db
|
refs/heads/master
| 2020-03-27T21:53:08.768739
| 2018-09-16T15:36:17
| 2018-09-16T15:36:17
| 147,183,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,477
|
py
|
import re
# compile() creates regular expression character class [a-e],
# which is equivalent to [abcde].
# class [abcde] will match with string with 'a', 'b', 'c', 'd', 'e'.
# q = re.compile('[a-e]')
# r = re.compile('\d')
# r1 = re.compile('\d+')
# s = re.compile('\D')
# t = re.compile('\s')
# u = re.compile('\S')
# v = re.compile('\w')
# w = re.compile('\W+')
# print(q.findall("I went to him at 11 A.M. on 4th July 1886.He is a #@$%***"))
# print(r.findall("I went to him at 11 A.M. on 4th July 1886.He is a #@$%***"))
# print(r1.findall("I went to him at 11 A.M. on 4th July 1886. He is a #@$%***"))
# print(s.findall("I went to him at 11 A.M. on 4th July 1886.He is a #@$%***"))
# print(t.findall("I went to him at 11 A.M. on 4th July 1886.He is a #@$%***"))
# print(u.findall("I went to him at 11 A.M. on 4th July 1886.He is a #@$%***"))
# print(v.findall("I went to him at 11 A.M. on 4th July 1886.He is a #@$%***"))
# print(w.findall("I went to him at 11 A.M. on 4th July 1886.He is a #@$%***"))
# print type(q), type(r)
# print re.split('[a-f]\d+','Hi HE10llo, hurry come here Watson. his Bday 11th Jan 2001. I know him from 10th Dec 2010',flags=re.IGNORECASE)
str = r'I am Prasit Bagal. I live in Bangalore @ Myhna heights, A-804 appartment.Gunjur 560087. ' \
r'Prasit is a good boy. Prasit does not own any car now but he wish to have one good CAR in future. ' \
r'PRASIT BagaL is married'
'''
# a = re.compile('Prasit(?=Bagal)')
# (?iLmsux word/name) --> finds the word/name with IGNORECASE,MULTILINE,LOCALE DEPENDENT,DOT MATCHES ALL,UNICODE, VERBOSE
print "****** 1 ******"
pat = '(?iLmsux)Bagal'
print re.match(pat,str)
print re.search(pat,str)
print re.findall(pat, str)
#m = re.search(pat,str)
#print m.group(0)
#print m.groups(1)
print '===================================='
# This is called a lookahead assertion.
print "****** 2 ******"
pat1 = 'Prasit (?=Bagal)'
print "MATCH:::", re.match(pat1,str)
print "SEARCH:::", re.search(pat1,str)
print "FINDALL:::", re.findall(pat1, str)
print "FINDITER:::", re.finditer(pat1, str)
m = re.compile(pat1,re.I)
print m
print m.sub('RINKI ', str)
#m = re.search(pat1,str)
#print m.group(0)
#print m.groups(1)
print '===================================='
print "****** 3 ******"
# This is called a positive lookbehind assertion.
pat2 = '(?<=Prasit) Bagal'
print "MATCH:::",re.match(pat2, str)
print "SEARCH:::",re.search(pat2, str)
print "FINDALL:::",re.findall(pat2, str)
prasit_iter = re.finditer(pat2, str)
print "FINDITER:::", prasit_iter
m = re.compile(pat2, re.I)
print m
print m.sub(' RINKI',str)
for i in prasit_iter:
print "starting position:::", i.start()
print "ending position:::", i.end()
print "GROUP:::", i.group()
print '===================================='
print "****** 4 ******"
pat3 = 'it?'
print "MATCH:::",re.match(pat3, str)
print "SEARCH:::",re.search(pat3, str)
print "FINDALL:::",re.findall(pat3, str)
print "FINDITER:::",re.finditer(pat3, str)
print '===================================='
print "****** 5 ******"
pat4 = '(?P<Prasit>) Bagal'
print re.match(pat4, str)
print re.search(pat4, str)
print re.findall(pat4, str)
print re.finditer(pat4, str)
print '===================================='
print "****** 6 ******"
str1= r'I am Prasit Bagal. I live in Bangalore @#! Myhna heights, A-804 apartment, Gunjur @#!560087.' \
r'Prasit is a good boy. Prasit does not own any car now but he wish to have one good CAR in future' \
r'PRASIT BagaL native is Kolkata'
pat5 = r'([\w-]+\d)'
pat6 = r'([\w-]+) apartment'
print re.match(pat5, str1)
print re.search(pat5, str1)
print re.findall(pat5, str1)
print re.findall(pat6, str1)
'''
str1 ='Bangalore: 560087, Bangalore:560089,Bangalore:560090,Bangalore:560088 ,' \
'Bangalore:660099,Bangalore:660090,Delhi:560087,Delhi:560088,Delhi:560089,' \
'bangalore:560078,bangalore:560098'
pat1 = r'\b((?:b|B)angalore):(?=(5(?:\d+)))\b'
c = re.findall(pat1,str1)
#print c
str2 = "Hello my Number is +91-1234567889 and my friend's number is +91-9876543212" \
"my uncle Number is 080-44441234 and my aunt mumber is +1-1234598765 " \
"my wife number is +92-8197601520 "
pat2 =r'(\W9[1-3])-(?=(\d{10}))'
d = re.findall(pat2,str2)
#print d
str3 = 'hi hello 832-472-0660 how are you 8197601520 am fine 4444-778700' \
'done done 281-222-3212 good bad cool woollo 916-332-1110 great man 1234567891' \
'hhime can dan man'
pat3 = r'\b(([a-zA-z]{2})+)\b'
e = re.findall(pat3,str3)
#print e
str4 = 'Hello shubhamg199630@gmail.com , Rohitneeraj@gmail.com ,' \
'123prasit@yahoo.com abc 123 dey.rinki_123@hotmail.com'
pat4 = r'\S+@\S+'
f = re.findall(pat4,str4)
#print f
# password must meet four conditions:
#
# 1. The password must have between six and ten word characters \w
# 2. It must include at least one lowercase character [a-z]
# 3. It must include at least three uppercase characters [A-Z]
# 4. It must include at least one digit \d
str5 = 'hi hello Iam23FiNe 832-472-0660 how are you 8197601520. LkEWood1 am fine 4444-778700' \
'done done 281-222-3212 good bad cool PRASIT woollo 916-332-1110 KAYAL great man 1234567891' \
'ABCe1 can dan#!~54S?> man rhtdM_@123 and ABC123ac'
# The password length must be greater than or equal to 8
# The password must contain one or more uppercase characters
# The password must contain one or more lowercase characters
# The password must contain one or more numeric values
# The password must contain one or more special characters
#pat5 = r'([(a-zA-z0-9\S)]{6,})'
#pat5 = r'[^a-z]*[a-z]'
#pat5 = r'\A(?=[^a-z]*[a-z])(?=(?:[^A-Z]*[A-Z]){3})(?=\D*\d)\w{6,10}\z'
#pat5 = r'\A(?=\w{6,10}\z)(?=[^a-z]*[a-z])(?=(?:[^A-Z]*[A-Z]){3})\D*\d.*\z'
#pat5 = r'([A-Z]{3,})'
#pat5 = r'([a-z]{1,})'
#pat5 = r'(?=^.{6,}$)(?=.*\d)(?=.*[!@#$%^&*]+)(?![.\n])(?=.*[A-Z])(?=.*[a-z]).*$'
pat5 = r'((?=.*\d{1,})(?=.*[a-z]{1,})(?=.*[A-Z]{3,})(?=.*[@#$%]).{6})'
pat6 = r'^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[\W]).*'
pwd = re.findall(pat6,str5)
print pwd
'''
fh = open(r'C:\Component_logs\TEMP\nvme.txt','r')
data = fh.read()
#print type(data)
#print data
pat = r'NVMe \S+'
c = re.findall(pat,data)
print list(set(c))
'''
# Fetchng the FILE NAME from a FILE PATH
'''
str5 = r'C:\test_project\SKP_MH_Project\regular_expression.py ' \
r'C:\test_project\SKP_MH_Project\regular.log'
pat2 = r'((?:[^\\]*).(?:py|log))'
print str5
#s = re.findall(pat,str5)
m = re.findall(pat2,str5,re.M)
#print s
print m
'''
|
[
"prasit.kum.bagal@hpe.com"
] |
prasit.kum.bagal@hpe.com
|
ef658a18adc4441b54812d8bfec433c9e38fd626
|
aebdfdf697e83b21013662b5826ffbf4af8ece4e
|
/algo/tests.py
|
8066b6ad7b943c1c489494c03ec70b336193ff36
|
[] |
no_license
|
lion416/intemass_old
|
2c3c2afdd6f9d601d5c3865e3fd769dc4898da37
|
56e25db7bf84d0962e7b09f6c041d4dd21cce0de
|
refs/heads/master
| 2020-04-11T14:41:28.803406
| 2018-12-15T03:12:12
| 2018-12-15T03:12:12
| 161,864,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42,625
|
py
|
#! /usr/bin/python
#
# tests.py: System tests for essay grading algorithm.
#
# tests.py:
from django.test import TestCase
from django.utils import unittest
import logging
from standard import Standard
from markscheme import MarkScheme
## OLD: from algo.answer import Answer
from answer import Answer
import os
import re
import nltk
import math
import pprint
import random
from django.conf import settings
# Note: following is used for invoking tests directly. This requires unittest2 in order
# to be compatible with Python versions less than 2.7
import unittest2
from common import *
debug_print("algo/tests.py start: " + debug_timestamp())
#------------------------------------------------------------------------
# Globals
# Analyze correspondence annotations (e.g., [[s1c => k2a]]
CHECK_LINKAGES = getenv_boolean("CHECK_LINKAGES", False)
# Allow for correspondence annotations without square brackets (provided on line by itself)
ALLOW_LOOSE_ANNOTATIONS = getenv_boolean("ALLOW_LOOSE_ANNOTATIONS", False)
# Ratio of character offset overlap for lines to be considered a match
OVERLAP_THRESHOLD = getenv_number("OVERLAP_THRESHOLD", 0.5)
# Number of times to test each student repsonse for question 1
NUM_TRIALS = getenv_int("NUM_TRIALS", 10)
# Maximum number of essays to process (n.b., use only to speed up debugging)
MAX_ESSAYS = getenv_int("MAX_ESSAYS", 32)
# Whether to exclude units tests known to take a while (i.e. most of them!)
EXCLUDE_LONG_TESTS = __debug__ and getenv_boolean("EXCLUDE_LONG_TESTS", True)
# Skip the class overrides defined here for Standard and Answer (on by default)
SKIP_OVERRIDES = __debug__ and getenv_boolean("SKIP_OVERRIDES", False)
USE_OVERRIDES = (not SKIP_OVERRIDES)
# Whether test_Q1_all applies random sentence matching thresholds (on by default)
RANDOM_THRESHOLDS = getenv_boolean("RANDOM_THRESHOLDS", True)
# Random seed to use (if nonzero)
RANDOM_SEED = getenv_int("RANDOM_SEED", 0)
# Use the student frequent distribution for global documents counts (not teacher)
# Note: needed for term expansion to work properly
USE_STUDENT_TEXT_DIST = getenv_boolean("USE_STUDENT_TEXT_DIST", True)
#------------------------------------------------------------------------
# Defines overrides method for @overrides annotation
# TODO: Just define if not already defined
#
def overrides(interface_class):
def overrider(method):
assert(method.__name__ in dir(interface_class))
return method
return overrider
#------------------------------------------------------------------------
# Supporting classes
#
# Annotations: class for representing annotations on essays, such as the
# clause-level correspondence between the student answer and the teacher's
# key.
#
class Annotations:
# constructor: initialize hash from labels to text units (e.g., clause)
# as well as hash giving correspondence of student textual units to key.
def __init__(self):
debug_print("Annotations.__init__(%s)" % self, level=6)
self.textual_units = dict()
self.start_offset = dict()
self.end_offset = dict()
self.correspondences = dict()
self.original_text = None
self.text_proper = None
self.last_label = None
# interpret_annotation(annotation): Analyze ANNOTATION which can either be a label
# or a correspodence section.
# Notes:
# - this maintained result a hash from each student label to linked standard label(s)
# - this also keeps track of starting offset for labels in start_offset, assuming
# text_proper is incrementally being updated elsewhere along with this invocation.
# - annotation labels are converted to uppercase
#
def interpret_annotation(self, annotation_text):
debug_print("interpret_annotation(_)", level=5)
debug_print("\tannotation_text=%s" % annotation_text, level=6)
assert((self.text_proper != None) and (len(self.text_proper) >= 0))
annotation_text = annotation_text.upper()
match = re.search("=>", annotation_text)
if not match:
# Handle text label
current_offset = len(self.text_proper)
self.start_offset[annotation_text] = current_offset
if not self.last_label is None:
self.end_offset[self.last_label] = current_offset
self.last_label = annotation_text
else:
# Handle correspondence linkages
while (len(annotation_text) > 0):
# TODO: check for weight (e.g., "p1a => 1b (0.5)")
## OLD: match = re.search(r"(\S+)\s*=>\s*(\S+)", annotation_text)
## OLD: match = re.search(r"([^:; \t\]])\s*=>\s*([^:; \t\]]+)", annotation_text)
match = re.search("([^:;= \t\n\[\]]+)\s*=>\s*([^:;= \t\n\[\]]+)", annotation_text)
if (match):
student = match.group(1)
key = match.group(2)
if (not self.correspondences.has_key(key)):
self.correspondences[key] = []
self.correspondences[key].append(student)
debug_print("Adding correspondence: '%s' => '%s'" % (student, key), level=4)
if (re.search("=\s*>", annotation_text[0: match.start(0)])):
# TODO: pass in filename to function and add to warning (likewise below)
print_stderr("Warning: missed annotation-like text in '%s'" % annotation_text[0: match.start(0)])
annotation_text = annotation_text[match.end(0) : ]
else:
if (re.search("=\s*>", annotation_text)):
print_stderr("Warning: missed annotation-like text in remainder '%s'" % annotation_text)
annotation_text = ""
# extract_annotations(text): Analyzes text to extract hashes mapping
# labels into textual units and for mapping student labels into key
# TODO: track down slight offset problem for annotations from new-Q1-Standard.docx
#
def extract_annotations(self, text):
debug_print("extract_annotations(_)", level=5)
debug_print("\ttext=%s" % text, level=7)
self.text_proper = ""
self.last_label = None
# Extract label offsets and then label correspondences
while (len(text) > 0):
# Check for annotations within double square brackets (e.g., [[p5a]]).
# As a fallback, this checks for lines consisting just of " label => label ".
# TODO: Have the annotator fix his annotations that lack the brackets.
match = re.search(r"\[\[([^[]+)\]\]", text)
if (not match) and ALLOW_LOOSE_ANNOTATIONS:
## OLD: match = re.search(r"\n\s*(([^:;= \t\n\[\]]+)\s*=>\s*([^:;= \t\n\[\]]+))\s*\n", text)
## TODO: match = re.search(r"\n\s*(([^:;= \t\n\[\]]+)\s*=>\s*([^:;= \t\n\[\]]+))", text)
match = re.search(r"(([^:;= \t\n\[\]]+)\s*=>\s*([^:;= \t\n\[\]]+))", text)
# Extract annotations if found
if (match):
## TODO: annotation = match.group(0)
annotation_text = match.group(1)
self.text_proper += text[0 : match.start(0)] + " "
text = text[match.end(0) : ]
self.interpret_annotation(annotation_text)
## OLD: replacement_spaces = " " * len(annotation)
## OLD: self.text_proper += " " + text[0 : match.start(0)] + replacement_spaces
# Othewise add remainder of text to version sana annotations
else:
if not match:
self.text_proper += text + " "
text = ""
# Finalize the label tracking
if self.last_label:
self.end_offset[self.last_label] = len(self.text_proper)
# Convert start_offset/end_offset info into textual_unit's
for unit in self.end_offset.keys():
start = self.start_offset[unit]
end = self.end_offset[unit]
self.textual_units[unit] = self.text_proper[start : end]
# Display in order by starting offset
debug_print("Annotation textual units 'label\t[start, end): text', ...:", level=2)
units = self.textual_units.keys()
for unit in sorted(units, key=lambda k: self.start_offset[k]):
debug_print("%s\t[%s, %s): { %s }" % (unit, self.start_offset[unit], self.end_offset[unit], self.textual_units[unit]), level=2)
debug_print("text_proper={\n%s\n\t}" % self.text_proper, level=7)
# evaluate_linkages(answer_annotations, detailed_mark_list, good_points): Compares the manual linakge annotations
# with the result of the system.
# Returns: num_good, num_system, num_manual
#
def evaluate_linkages(answer_annotations, detailed_mark_list, good_points):
debug_print("evaluate_linkages%s" % str((answer_annotations, '_', good_points)), level=5)
debug_print("\tdetailed_mark_list=%s" % detailed_mark_list, level=7)
num_good = 0
# Initialize totals
## OLD: num_system = len(good_points)
num_system = 0
num_manual = len(answer_annotations.correspondences)
# Analyze annotation links
# Check matches established by system against annotations
# Note: Standard labels subsume the point names (e.g., labels 1.1a and 1.1b for point 1.1)
for point in good_points:
debug_print("Point: %s" % point, level=4)
point_label_prefix = re.sub("^P", "", point.upper())
# Check student answer annotations linkages that map into standard with corresponding (correct) key
## standard_point_labels = [label for label in answer_annotations.correspondences.keys() if (label.find(point_label_prefix) != -1)]
standard_point_labels = [label for label in answer_annotations.correspondences.keys() if (label.find(point_label_prefix) == 0)]
for std_label in standard_point_labels:
debug_print("Standard label: %s" % std_label, level=4)
try:
# Find out locations indicated by annotations
student_labels = answer_annotations.correspondences[std_label]
for stu_label in student_labels:
debug_print("Matching student label: %s" % stu_label, level=4)
# Get annotation offsets
annot_start = answer_annotations.start_offset[stu_label]
annot_end = answer_annotations.end_offset[stu_label]
annot_text = answer_annotations.textual_units[stu_label]
debug_print("annot: offsets=(%d, %d) text=%s" % (annot_start, annot_end, annot_text), level=4)
# Get system answer offsets
system_start = -1
system_end = -1
system_text = "n/a"
for mark in detailed_mark_list:
if (mark['Point_No'] == point):
match_sen = mark['Match_Sen']
system_start = match_sen['Start']
system_end = match_sen['End']
system_text = match_sen['StuS']
break
debug_print("system: offsets=(%d, %d) text=%s" % (system_start, system_end, system_text), level=4)
# Make sure offsets overlap significantly
## TODO: rework num_system so not dependent upon annotation labels
if (system_start != -1):
num_system += 1
is_overlap = False
if (annot_start <= system_start) and (annot_end >= system_start):
overlap_ratio = float(annot_end - system_start) / (annot_end - annot_start)
is_overlap = (overlap_ratio >= OVERLAP_THRESHOLD)
if (annot_start >= system_start) and (annot_end <= system_start):
overlap_ratio = float(annot_start - system_end) / (annot_end - annot_start)
is_overlap = (overlap_ratio >= OVERLAP_THRESHOLD)
if is_overlap:
debug_print("Correct student %s linkage with standard %s" % (stu_label, std_label), level=3)
num_good += 1
except KeyError:
print_stderr("Exception in evaluate_linkages: " + str(sys.exc_info()))
if (num_system != len(good_points)):
print_stderr("Warning: Discrepency in num_system (%d) vs. num good_points (%d)" % (num_system, len(good_points)))
debug_print("evaluate_linkages() => %s" % str((num_good, num_system, num_manual)), level=3)
return num_good, num_system, num_manual
# calculate_fscore (case, good, manual, system): Show F-Score for CASE given counts for number GOOD, MANUAL, and SYSTEM.
#
def calculate_fscore (case, num_good, num_manual, num_system):
recall = 0
precision = 0
f_score = 0
print "%s: Num good %d; num manual %d; num system %d" % (case, num_good, num_manual, num_system)
if num_manual > 0:
recall = float(num_good) / num_manual
if num_system > 0:
precision = float(num_good) / num_system
if (recall > 0) or (precision > 0):
f_score = 2 * (precision * recall) / (precision + recall)
print "Recall %.3f; Precision %.3f; F-Score %.3f" % (round(recall, 3), round(precision, 3), round(f_score, 3))
# WARNING: The following overrides the classes for an unapparent testing purpose
# (e.g., abStandard for Standard from standard.py). However, this practice is extremely
# dangerous, as this no longer represents a test of the actual deployed code!!!
#
class abStandard(Standard):
# constructor: warn about using different version of code
def __init__(self):
debug_print("Warning: Using shameless hack (abStandard testing class): FIX ME!")
Standard.__init__(self)
# note: There doesn't seem to be any difference in this version and the one in standard.py,
# except for syntactic variant in using [result for item in list] here rather than
# list(result for item in list) there.
#
@overrides(Standard)
def CalVector(self, sentencelist):
debug_print("abStandard.CalVector(_)", level=5)
text_words = []
for id, sentence in enumerate(sentencelist):
raw = self.ParseKeyword(sentence['KeyS'])
text = nltk.word_tokenize(raw)
stopwords_list = nltk.corpus.stopwords.raw('english').split()
'''
words = list(nltk.corpus.wordnet.morphy(word.lower())
for word, tag in nltk.pos_tag(text)
if (tag.startswith('V') or tag == 'NN' or tag == 'NNS')
and word not in stopwords_list)
'''
words = [nltk.corpus.wordnet.morphy(word.lower())
for (word, tag) in nltk.pos_tag(text)
if (tag.startswith('V') or tag.startswith('NN') or tag == 'JJ' or tag == 'DET' or tag == 'RB')
and word not in stopwords_list]
sentence['SenWords'] = list(word for word in words if word)
text_words += sentence['SenWords']
textfdist = nltk.FreqDist(text_words)
return textfdist
# There is no difference is this version than in standard.py, except for minor comment changes..
#
@overrides(Standard)
def SentenceCal(self, sentencelist, textfdist):
debug_print("abStandard.SentenceCal(_)", level=5)
for sentence in sentencelist:
#text = nltk.word_tokenize(sentence['KeyS'])
fdist = nltk.FreqDist(sentence['SenWords'])
senvec = {}
sen_len = len(sentencelist)
for word in sorted(textfdist):
if fdist[word]:
#the frequency of sentence which contains this word in all sentencelist
sentencefreq = sum(1 for senten in sentencelist if word in senten['SenWords'])
senvec[word] = (1 + math.log(2.0 * fdist[word])) * math.log(2.0 * sen_len / sentencefreq)
#senvec[word] = (1 + math.log(1.0 * fdist[word])) * math.log(1.0 * sen_len / sentencefreq)
else:
senvec[word] = 0
sentence['KeySVec'] = senvec
return sentencelist
class abAnswer(Answer):
# constructor: initializes default settings
# note: different thresholds are used here versus those in answer.py:
# dist_threshold: 0.30 (vs. 0.25)
# sen_threshold: 0.15 (vs. 0.33)
# multisen_matchrate and multisen_threshold are the same
# TODO: Use same settings!!!
#
@overrides(Answer)
def __init__(self, **kwargs):
debug_print("Warning: Using shameless hack (abAnswer testing class): FIX ME!")
Answer.__init__(self)
self.dist_threshold = kwargs.get('dist_threshold') or 0.3
self.multisen_matchrate = kwargs.get('multisen_matchrate') or 0.3
self.sen_threshold = kwargs.get('sen_threshold') or 0.15
self.multisen_threshold = kwargs.get('multisen_threshold') or 0.4
nltk.data.path = [settings.NLTKDATAPATH]
# note: No difference from answer.py version except for minor comment change
@overrides(Answer)
def SentenceAnalysis(self, fulltext, textfdist):
debug_print("abAnswer.SentenceAnalysis(_)", level=5)
ans_sentencelist = []
text = fulltext.replace('\n', ' ')
## OLD: p = re.compile(r'.+\.')
p = re.compile(r'([\w\"\'\<\(][\S ]+?[\.!?])[ \n\"]')
keysen = p.findall(text)
sen_no = 0
for sen in keysen:
sen_no += 1
text = nltk.word_tokenize(sen)
text_words = list(nltk.corpus.wordnet.morphy(word.lower()) for (word, tag) in nltk.pos_tag(text))
ans_sentencelist.append({'StuS': sen,
'StuWords': list(word for word in text_words if word),
'No': sen_no})
for sentence in ans_sentencelist:
fdist = nltk.FreqDist(sentence['StuWords'])
senvec = {}
for word in sorted(textfdist):
if fdist[word]:
wordfreq = sum(1 for senten in ans_sentencelist if word in senten['StuWords'])
senvec[word] = (1 + math.log(2.0 * fdist[word])) * math.log(2.0 * len(keysen) / wordfreq)
#senvec[word] = (1 + math.log(1.0 * fdist[word])) * math.log(1.0 * len(keysen) / wordfreq)
else:
senvec[word] = 0
sentence['StuSVec'] = senvec
return ans_sentencelist
# note: no difference from from answer.py version except for Pearson correlation trace here that was commented out
#
@overrides(Answer)
def CalCosDist(self, ans_sentencelist, std_sen):
debug_print("abAnswer.CalCosDist(_)", level=5)
match_sen = None
max_cos = 0
if __debug__:
# TODO: Move this elsewhere
def pearson(ans_sentencelist, std_sen):
n = len(std_sen['KeySVec'])
sum_stu = sum(stu_sen['StuSVec'][word] for word in std_sen['KeySVec'] for stu_sen in ans_sentencelist)
sum_std = sum(std_sen['KeySVec'][word] for word in std_sen['KeySVec'])
sum_stu_sq = sum(stu_sen['StuSVec'][word] ** 2 for word in std_sen['KeySVec'] for stu_sen in ans_sentencelist)
sum_std_sq = sum(std_sen['KeySVec'][word] ** 2 for word in std_sen['KeySVec'])
psum = sum(stu_sen['StuSVec'][word] * std_sen['KeySVec'][word] for word in std_sen['KeySVec'] for stu_sen in ans_sentencelist)
num = psum - (sum_stu * sum_std / n)
den = ((sum_stu_sq - math.pow(sum_stu, 2) / n) * (sum_std_sq - math.pow(sum_std, 2) / n)) ** .5
if den == 0:
return 1
r = num / den
return r
debug_print_without_newline("pearson = ")
try:
print pearson(ans_sentencelist, std_sen)
except:
print "n/a"
debug_print("Exception during pearson calculation: " + str(sys.exc_info()))
for stu_sen in ans_sentencelist:
q, s, qs = 0, 0, 0
for word in std_sen['KeySVec']:
q += std_sen['KeySVec'][word] * std_sen['KeySVec'][word]
s += stu_sen['StuSVec'][word] * stu_sen['StuSVec'][word]
qs += std_sen['KeySVec'][word] * stu_sen['StuSVec'][word]
if q == 0 or s == 0:
qs_cos = 0
else:
qs_cos = qs / (math.sqrt(q * s))
stu_words = [word for word in stu_sen['StuSVec'] if stu_sen['StuSVec'][word] > 0]
if qs_cos > max_cos and len(stu_words) > 0:
max_cos = qs_cos
match_sen = stu_sen
# note: empty list returned for matching words as not used by overriden-class tests
return max_cos, match_sen, []
#------------------------------------------------------------------------
# Test Cases
#
# Note: These don't actually test for specific conditions (e.g., via assertTrue),
# hence they are more like examples than actual test cases.
#
# Note: The class itself shouldn't be excluded, just the specific method that might take too long.
## OLD: @unittest.skip("Too much time")
class AlgorithmTest(TestCase):
# # constructor: initialize instance variables
# def __init__(self):
# self.standard_annotations = None
# self.student_annotations = None
# Setup(): testing setup code
def setUp(self):
self.logger = logging.getLogger(__name__)
## OLD: @unittest.skipIf(EXCLUDE_LONG_TESTS, "Too much time")
def test_standard(self):
self.logger.info("Test Standard Answer Analysis")
testStandardAnswerFile = "ans_Q1.txt"
filePath = os.path.join("algo/testdata/raw/Q1", testStandardAnswerFile)
self.logger.info("filepath:%s" % filePath)
if not os.path.isfile(filePath):
self.logger.error("Test file doesn't exist:%s" % testStandardAnswerFile)
assert False
fh = file(filePath, "r")
filetext = fh.read()
fh.close()
sinst = Standard()
pointlist, textfdist, slist = sinst.Analysis(filetext)
if __debug__:
print "Word frequencies"
for word,freq in textfdist.items():
print "%s:%d" % (word,freq)
pprint.pprint(slist)
if __debug__ and sinst.apply_grammar_checking:
print("standard critique: %s" % sinst.critique_results)
self.logger.info("Test Standard Answer Analysis finished")
# Derives frequency distribution and shows old vs. new.
#
def get_student_text_distribution(anstext, std_textfdist):
debug_print("Note: Deriving alternative global frequency distribution (from student text) for use with Answer.Analysis()")
sinst = Standard()
stu_pointlist, stu_textfdist, stu_slist = sinst.Analysis(anstext)
debug_print("\tstandard dist: " + str(std_textfdist), level=4)
debug_print("\tstudent dist: " + str(stu_textfdist), level=4)
return stu_textfdist
# NOTE: MarkScheme is not used in the system, so this should be rewritten
# to use MarkingSchemeLang instead.
@unittest.skipIf(EXCLUDE_LONG_TESTS, "Too much time")
def test_markscheme(self):
self.logger.info("Test Marking Scheme Analysis/Rule Generation")
mockplist = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P3.4']
mocktemplates = ""
#Negative
mocktemplates += 'all except P3 and P22,8,'
mocktemplates += 'only P1 or Ps,1,'
mocktemplates += 'only some from all,8,'
mocktemplates += 'only 2 from all,8,'
#Positive
mocktemplates += 'all less two combination of p1 and p2 and p3 and p4\
and p5 and p6 and p7 and p8 and p9 and p10 and 11'
mocktemplates += 'all less P5,8,'
mocktemplates += 'all less P3 and P22 or P4 and P5 or P6 or P7,8,'
mocktemplates += 'all,10,'
mocktemplates += 'only P1 or P6 and P7 and P4 or P88 or P89 or P90 and P2,8,'
mocktemplates += 'only P1 or P3.4,8,'
mocktemplates += 'any 2 combinations of P1;P3;P5;P99;P7,8,'
mocktemplates += 'any 2 combinations of P1;P3;P5;P99;P7 and \
any 1 combinations of P4;P6 and any 3 combinations of P2;P3.4,8,'
mocktemplates += 'less 2 combinations of P1;P3;P5;P99;P7 and\
less 1 combinations of P4;P6 and less 3 combinations of P2;P3.4,8,'
mocktemplates += 'all less 2 combinations of P1;P3;P5;P9,1,'
mocktemplates += 'all less 0 combinations of P1;P3;P5;P9,1,'
mocktemplates += 'all less -1 combinations of P1;P3;P5;P9,1,'
#be careful, last case has no trailing comma
mocktemplates += 'all less 4 combinations of P1;P3;P5,1'
ms = MarkScheme(mockplist)
rulelist = ms.GetRules(mocktemplates)
pprint.pprint(rulelist)
self.logger.info("Test Marking Scheme Analysis/Rule Generation Finished")
@unittest.skipIf(EXCLUDE_LONG_TESTS, "Too much time")
def test_answer(self):
self.logger.info("Test Student Answer Analysis")
# Read in the correct answer to first question
# TODO: Create helper function for reading question info as same code sequence used elsewhere.
testStandardAnswerFile = "ans_Q1.txt"
stdFilePath = os.path.join("algo/testdata/raw/Q1", testStandardAnswerFile)
self.logger.info("stdanswer filepath:%s" % stdFilePath)
if not os.path.isfile(stdFilePath):
self.logger.error("Standard Test file doesn't exist:%s" % testStandardAnswerFile)
assert False
fh = file(stdFilePath, "r")
stdtext = fh.read()
fh.close()
# Perform text processing analysis over correct answer
sinst = Standard()
pointlist, textfdist, slist = sinst.Analysis(stdtext)
std_pointlist_no = [point['Point_No'] for point in pointlist]
self.logger.info("Points:%s" % std_pointlist_no)
# Read in the standard as if it were
# TODO: Just do an assignment for crying out loud! Such needless code repetiton!
# ex: anstext = stdtext
testAnswerFile = "ans_Q1.txt"
ansFilePath = os.path.join("algo/testdata/raw/Q1", testAnswerFile)
self.logger.info("answer filepath:%s" % ansFilePath)
if not os.path.isfile(ansFilePath):
self.logger.error("Answer file doesn't exist:%s" % testAnswerFile)
assert False
fh = file(ansFilePath, "r")
anstext = fh.read()
fh.close()
# Create some dummy grading rules
mockrulelist = [
{'Mark': 10, 'Point': ['P1.1', 'P1.2', 'P1.3', 'P2', 'P3', 'P4', 'P5']},
{'Mark': 7, 'Point': ['P1.1', 'P2', 'P3', 'P4', 'P5']},
{'Mark': 6, 'Point': ['P1.1', 'P2', 'P3', 'P4']},
{'Mark': 5, 'Point': ['P1.1', 'P2', 'P3']},
{'Mark': 3, 'Point': ['P1.1', 'P2']},
{'Mark': 2, 'Point': ['P1.1']}]
pprint.pprint(mockrulelist)
# Create the answer class instance and optionally override global frequency distribution from answer text.
# TODO: Always use freq dist for student text (not standard).
ans = Answer()
if (USE_STUDENT_TEXT_DIST):
textfdist = get_student_text_distribution(anstext, textfdist)
# Preprocess the student answer and then compare resulting vectors against standard
# TODO: Raise an exception if the result is not as expected
mark, marklist, ommited = ans.Analysis(anstext, textfdist, slist, pointlist, mockrulelist)
pprint.pprint(mark)
pprint.pprint(ommited)
self.logger.info("Test Student Answer Analysis Finished")
def __parsescheme(self, rawschemes):
rawschemelist = rawschemes.split(',')
txtschemelist = []
if len(rawschemelist) >= 2:
for i in range(0, len(rawschemelist), 2):
str1 = str(rawschemelist[i])
str2 = str(rawschemelist[i + 1])
txtschemelist.append([str1, str2])
txtschemelist.sort(key=lambda x: int(x[1]), reverse=True)
return txtschemelist
def __updaterulelist(self, scheme, pointlist):
txtplist = list(point['Point_No'] for point in pointlist if 'P0.' not in point['Point_No'])
txtrulelist = []
if txtplist:
try:
ms = MarkScheme(txtplist)
txtrulelist = list(rule for rule in ms.GetRules(scheme))
except:
debug_print("Exception during __updaterulelist: " + str(sys.exc_info()))
pass
return txtrulelist
# Helper function for testing Q1 sentences, returning result of text processing
# and term vector creation along with some grading rules for testing.
#
def parse_Q1(self):
debug_print("parse_Q1()", level=4)
# Read in the correct answer to first question
testStandardAnswerFile = "ans_Q1.txt"
filePath = os.path.join("algo/testdata/raw/Q1", testStandardAnswerFile)
self.logger.info("filepath:%s" % filePath)
if not os.path.isfile(filePath):
self.logger.error("Test file doesn't exist:%s" % testStandardAnswerFile)
assert False
debug_print("Processing standard file '%s'" % filePath, level=3)
fh = file(filePath, "r")
filetext = fh.read()
fh.close()
# Check the text for optional annotations and isolate
if CHECK_LINKAGES:
self.standard_annotations = Annotations()
self.standard_annotations.extract_annotations(filetext)
filetext = self.standard_annotations.text_proper
# Create the appropriate class instance for Standard
# TODO: Remove abAnswer method overrides altogether and do everything via proper subclassing (RTFM!!!).
## OLD: sinst = abStandard()
sinst = abStandard() if USE_OVERRIDES else Standard()
# Perform text processing analysis over sentence and return result along with some mocked up rules
pointlist, textfdist, slist = sinst.Analysis(filetext)
rulelist = [{'Mark': 10, 'Point': ['P1.1', 'P1.2', 'P2', 'P3', 'P4', 'P5', 'P6']},
{'Mark': 9, 'Point': ['P2', 'P1.1', 'P6', 'P4', 'P5']},
{'Mark': 9, 'Point': ['P2', 'P1.2', 'P6', 'P4', 'P5']},
{'Mark': 9, 'Point': ['P2', 'P3', 'P6', 'P4', 'P5']},
{'Mark': 9, 'Point': ['P2', 'P3', 'P6', 'P4', 'P5', 'P1.2']},
{'Mark': 9, 'Point': ['P2', 'P3', 'P6', 'P4', 'P5', 'P1.1']},
{'Mark': 9, 'Point': ['P3', 'P6', 'P4', 'P5', 'P1.2', 'P1.1']},
{'Mark': 8, 'Point': ['P3', 'P6', 'P4', 'P5']},
{'Mark': 7, 'Point': ['P2', 'P6', 'P4', 'P5', 'P1.2', 'P1.1']},
{'Mark': 6, 'Point': ['P6', 'P4', 'P5']},
{'Mark': 5, 'Point': ['P2', 'P3', 'P6', 'P5', 'P1.2', 'P1.1']},
{'Mark': 5, 'Point': ['P2', 'P3', 'P6', 'P4', 'P1.2', 'P1.1']},
{'Mark': 5, 'Point': ['P2', 'P3', 'P4', 'P5', 'P1.2', 'P1.1']},
{'Mark': 4, 'Point': ['P2', 'P3', 'P1.1', 'P4', 'P1.2']},
{'Mark': 4, 'Point': ['P2', 'P3', 'P1.1', 'P1.2', 'P5']},
{'Mark': 4, 'Point': ['P2', 'P3', 'P1.1', 'P6', 'P1.2']},
{'Mark': 3, 'Point': ['P2', 'P3', 'P1.1', 'P1.2']},
{'Mark': 2, 'Point': ['P3', 'P1.2']},
{'Mark': 2, 'Point': ['P3', 'P1.1']},
{'Mark': 2, 'Point': ['P1.2', 'P1.1']},
{'Mark': 1, 'Point': ['P1.1']},
{'Mark': 1, 'Point': ['P1.2']},
{'Mark': 1, 'Point': ['P2']},
{'Mark': 1, 'Point': ['P3']}]
return pointlist, textfdist, slist, rulelist
@unittest.skipIf(EXCLUDE_LONG_TESTS, "Too much time")
def test_Q1_single(self):
debug_print("test_Q1_single()", level=4)
pointlist, textfdist, slist, rulelist = self.parse_Q1()
## OLD: ans = abAnswer()
ansfile = 'algo/testdata/raw/Q1/Q1_SS16.docx.txt'
fh = file(ansfile, "r")
anstext = fh.read()
fh.close()
manualmark = 1
# Create the answer class instance and optionally override global frequency distribution from answer text.
# TODO: Always use freq dist for student text (not standard).
ans = abAnswer() if USE_OVERRIDES else Answer()
if (USE_STUDENT_TEXT_DIST):
textfdist = get_student_text_distribution(anstext, textfdist)
mark, marklist, ommited = ans.Analysis(anstext, textfdist, slist, pointlist, rulelist)
err = mark - manualmark
print("%s\t%d\t%s\t%d" % (ansfile, mark, marklist, err))
# test_Q1_all(): grade each of the 32 student papers against the standard key, comparing the system
# score to that of manual grading.
# In addition, a separate evaluation is done in terms of recall/precision of matching student answers sentences
# with those in the standard key by comparison against correspondence annotations (i.e., linkages).
#
## TODO: @unittest.skipIf(EXCLUDE_LONG_TESTS, "Too much time")
def test_Q1_all(self):
debug_print("test_Q1_all()", level=4)
pointlist, textfdist, slist, rulelist = self.parse_Q1()
manuallist = [9, 7, 9, 7, 7, 10, 7, 7, 7, 7, 7, 1, 2, 2, 0, 1, 2, 2, 1, 1, 8, 9, 4, 9, 7, 9, 0, 7, 4, 10, 7, 7]
minmaxerr = 0
minrd = 0
minerrcount = 0
total_good = 0
total_system = 0
total_manual = 0
# Optionally initialize random see (useful for debugging)
if RANDOM_SEED > 0:
debug_print("Setting random seed to %d" % RANDOM_SEED)
random.seed(RANDOM_SEED)
# Run ten different evaluations with different random sentence matching threshold
for i in range(NUM_TRIALS):
trial_num = i + 1
debug_print("trial %d" % trial_num, level=4)
maxerr = 0
errcount = 0
var = 0
# Create the appropriate class instance for Answer
# Note: default thresholds: dist_threshold 0.25, multisen_matchrate 0.3, sen_threshold 0.33, multisen_threshold 0.4
# TODO: Remove abAnswer method overrides altogether and do everything via proper subclassing (RTFM!!!).
## OLD: ans = abAnswer(dist_threshold=rd, multisen_matchrate=0.3, sen_threshold=rd, multisen_threshold=0.4)
ans = None
if (USE_OVERRIDES):
## OLD: ans = abAnswer(dist_threshold=rd, multisen_matchrate=0.3, sen_threshold=rd, multisen_threshold=0.4)
ans = abAnswer()
else:
ans = Answer()
if RANDOM_THRESHOLDS:
rd = random.uniform(0.32, 0.36)
debug_print_without_newline("rd = ")
print rd
ans.dist_threshold=rd
ans.multisen_matchrate=0.3
ans.sen_threshold=rd
ans.multisen_threshold=0.4
# Test all cases in Q1 directory
# TODO: replace os.walk with directory read
Q1_base_dir = 'algo/testdata/raw/Q1'
for root, dirs, files in os.walk('algo/testdata/raw/Q1'):
if root != Q1_base_dir:
continue
if 'Standard' in dirs:
dirs.remove('Standard')
for idx in range(0, MAX_ESSAYS):
# Read student answer
## OLD: ansfile = 'Q1_SS' + str(idx + 1) + '.docx.txt'
ext = '.annot.txt' if CHECK_LINKAGES else '.docx.txt'
ansfile = 'Q1_SS' + str(idx + 1) + ext
debug_print("Processing student answer '%s'" % ansfile, level=3)
filePath = os.path.join(root, ansfile)
fh = file(filePath, "r")
anstext = fh.read()
debug_print("anstext: { %s }" % anstext, level=7)
fh.close()
# Check the text for optional annotations and isolate
if CHECK_LINKAGES:
self.answer_annotations = Annotations()
self.answer_annotations.extract_annotations(anstext)
## DUH: anstext = self.standard_annotations.text_proper
anstext = self.answer_annotations.text_proper
# Grade the essay and compare against manual
# TODO: Always use freq dist for student text (not standard)
if (USE_STUDENT_TEXT_DIST):
textfdist = get_student_text_distribution(anstext, textfdist)
mark, marklist, ommited = ans.Analysis(anstext, textfdist, slist, pointlist, rulelist)
err = mark - manuallist[idx]
# Check the system sentence linkages versus the annotations
if CHECK_LINKAGES:
num_good, num_system, num_manual = evaluate_linkages(self.answer_annotations, ans.detailedmarklist, marklist)
calculate_fscore("ss" + str(idx + 1), num_good, num_system, num_manual)
total_good += num_good
total_system += num_system
total_manual += num_manual
# Check grammar, etc.
if ans.apply_grammar_checking:
print("answer critique: %s" % ans.text_critique)
# Update statistics
maxerr += math.fabs(err)
var += err ** 2
errcount += 1 if math.fabs(err) > 3 else 0
print("%s\t%d\t%s\t%d" % (ansfile, mark, marklist, err))
if errcount < minerrcount:
minerrcount = errcount
minmaxerr = maxerr
minrd = rd
print "maxerr:%d, maxvar:%d, errcount:%d" % (maxerr, var, errcount)
print "minmaxerr:%d rd:%d count:%d" % (minmaxerr, minrd, minerrcount)
# Summarize answer/standard correspondences with respect to manual annotations
if CHECK_LINKAGES:
calculate_fscore("Overall", total_good, total_system, total_manual)
#__traversal_process(dir): Checks DIR for teacher's key (standard), marking scheme, and one or more student
# answers, using the following naming convention:
# ans_qN.txt scheme_qN.txt studM_qN.txt studM_qN.txt
# where N is th question number, and M is the student number. For example (from in testdata/raw/Q3):
# ans_q8.txt scheme_q8.txt stud1_q8.txt stud7_q8.txt
#
def __traversal_process(self, testdir):
ans = Answer()
for root, dirs, files in os.walk(testdir):
if 'Standard' in dirs:
dirs.remove('Standard')
for stdfile in files:
# Check for answer file (e.g., "ans_q8.txt")
if 'ans' in stdfile:
testno = stdfile[4:-4]
self.logger.info("no:%s" % testno)
stdPath = os.path.join(root, stdfile)
if not os.path.isfile(stdPath):
self.logger.error("Test file doesn't exist:%s" % stdfile)
assert False
fh = file(stdPath, "r")
filetext = fh.read()
fh.close()
sinst = Standard()
pointlist, textfdist, slist = sinst.Analysis(filetext)
# Check schema file (e.g., "scheme_q8.txt")
schemename = 'scheme_' + testno + '.txt'
schemepath = os.path.join(root, schemename)
fr = file(schemepath, 'r')
scheme = self.__parsescheme(fr.read())
fr.close()
rulelist = self.__updaterulelist(scheme, pointlist)
print("ansfile\tmark\tmarklist")
for idx in range(0, 10):
# Check student response file (e.g., "stud9_q8.txt")
ansfile = 'stud' + str(idx + 1) + '_' + testno + '.txt'
ansPath = os.path.join(root, ansfile)
if os.path.isfile(ansPath):
fa = file(ansPath, 'r')
anstext = fa.read()
fa.close()
if anstext:
# TODO: Always use freq dist for student text (not standard)
if (USE_STUDENT_TEXT_DIST):
textfdist = get_student_text_distribution(anstext, textfdist)
debug_print("Calling ans.Analysis%s" % str((anstext, textfdist, slist, pointlist, rulelist)), level=4)
mark, marklist, ommited = ans.Analysis(anstext, textfdist, slist, pointlist, rulelist)
else:
mark = 0
marklist = []
print("%s\t%d\t%s" % (ansfile, mark, marklist))
@unittest.skipIf(EXCLUDE_LONG_TESTS, "Too much time")
def test_Q2(self):
self.__traversal_process('algo/testdata/raw/Q2')
@unittest.skipIf(EXCLUDE_LONG_TESTS, "Too much time")
def test_Q3(self):
self.__traversal_process('algo/testdata/raw/Q3')
@unittest.skipIf(EXCLUDE_LONG_TESTS, "Too much time")
def test_Q4(self):
self.__traversal_process('algo/testdata/raw/Q4')
# Run the test if exceuted directly (avoids Django testing overhead)
#
debug_print("__name__ = " + str(__name__), level=4)
if __name__ == '__main__':
force_console_logging(__name__)
debug_print("Invoking tests directly")
# Note: following based on Python unittest module description (underlies Django one)
unittest.main()
debug_print("stop algo/tests.py: " + debug_timestamp())
|
[
"lion1992.4.16@gmail.com"
] |
lion1992.4.16@gmail.com
|
09a14e523890ef28ada7efebd508d23ba59cdc0e
|
801cb1fb8fff6566c8aa2faed53d6892f616e84b
|
/dashboard/views.py
|
f82be720131b5387c609f11f09887dc95b80b6de
|
[] |
no_license
|
fanout/scaledemo
|
1b216493570293127cd87d8a46c4af0ee806f4a8
|
4b5fa66067277196201a9339b25b079896c44940
|
refs/heads/master
| 2021-01-19T21:58:08.885585
| 2013-10-20T04:02:04
| 2013-10-20T04:02:04
| 12,533,685
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,832
|
py
|
import json
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotAllowed
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.conf import settings
import gripcontrol
import grip
import redis_ops
db = redis_ops.RedisOps()
pub = grip.Publisher()
if hasattr(settings, 'REDIS_HOST'):
db.host = settings.REDIS_HOST
if hasattr(settings, 'REDIS_PORT'):
db.port = settings.REDIS_PORT
if hasattr(settings, 'REDIS_DB'):
db.db = settings.REDIS_DB
if hasattr(settings, 'GRIP_PROXIES'):
grip_proxies = settings.GRIP_PROXIES
else:
grip_proxies = list()
if hasattr(settings, 'DASHBOARD_REDIS_PREFIX'):
db.prefix = settings.DASHBOARD_REDIS_PREFIX
else:
db.prefix = ''
if hasattr(settings, 'DASHBOARD_GRIP_PREFIX'):
grip_prefix = settings.DASHBOARD_GRIP_PREFIX
else:
grip_prefix = 'dashboard-'
pub.proxies = grip_proxies
def _get_stats():
data = db.get_stats_data()
if data is None:
data = dict({ 'id': 0 })
out = dict()
out['id'] = data['id']
out['capacity'] = data.get('capacity', 0)
out['edge_up'] = data.get('edge-up', 0)
out['edge_total'] = data.get('edge-total', 0)
out['client_up'] = data.get('client-up', 0)
out['client_total'] = data.get('client-total', 0)
out['ping_min'] = data.get('ping-min', 0)
out['ping_max'] = data.get('ping-max', 0)
out['ping_avg'] = data.get('ping-avg', 0)
out['received'] = data.get('received', 0)
out['receive_min'] = data.get('receive-min', 0)
out['receive_max'] = data.get('receive-max', 0)
out['receive_avg'] = data.get('receive-avg', 0)
out['message'] = data.get('message', '')
return out
def home(req):
if req.method == 'GET':
return render_to_response('dashboard/home.html', {}, context_instance=RequestContext(req))
else:
return HttpResponseNotAllowed(['GET'])
def status(req):
if req.method == 'GET':
last_id = req.GET.get('last_id')
if last_id is not None:
try:
last_id = int(last_id)
except:
return HttpResponseBadRequest('Bad Request: last_id wrong type\n')
try:
data = _get_stats()
except:
return HttpResponse('Service Unavailable\n', status=503)
if last_id is None or last_id != data['id']:
return HttpResponse(json.dumps(data) + '\n', content_type='application/json')
else:
if not grip.is_proxied(req, grip_proxies):
return HttpResponse('Not Implemented\n', status=501)
channel = gripcontrol.Channel(grip_prefix + 'status', str(data['id']))
theaders = dict()
theaders['Content-Type'] = 'application/json'
tbody = dict()
tbody_raw = json.dumps(tbody) + '\n'
tresponse = gripcontrol.Response(headers=theaders, body=tbody_raw)
instruct = gripcontrol.create_hold_response(channel, tresponse)
return HttpResponse(instruct, content_type='application/grip-instruct')
else:
return HttpResponseNotAllowed(['GET'])
|
[
"justin@affinix.com"
] |
justin@affinix.com
|
1c7a85d1bc47d5de449f689ef6517e479e0c12da
|
3482d0df16f4cc947601b70652f59a2860a80461
|
/sentiment_practice/explore.py
|
e6577ffd282d7b2e20c3e48316d50daf834ada87
|
[] |
no_license
|
seanongrl/Data-Science-Fintech
|
10f70ec491e84d12a819e12a5e8aa9047210ea26
|
2381788d9b1f129634eadc74ec15e000f3c435f8
|
refs/heads/main
| 2023-06-13T06:22:51.251584
| 2021-07-05T08:22:34
| 2021-07-05T08:22:34
| 358,801,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
# EXPLORE DATA SET
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data_source_url = "https://raw.githubusercontent.com/kolaveridi/kaggle-Twitter-US-Airline-Sentiment-/master/Tweets.csv"
airline_tweets = pd.read_csv(data_source_url)
plot_size = plt.rcParams["figure.figsize"]
plot_size[0] = 8
plot_size[1] = 6
plt.rcParams["figure.figsize"] = plot_size
airline_tweets.airline.value_counts().plot(kind='pie', autopct='%1.0f%%')
airline_tweets.airline_sentiment.value_counts().plot(kind='pie', autopct='%1.0f%%', colors=["red", "yellow", "green"])
airline_sentiment = airline_tweets.groupby(['airline', 'airline_sentiment']).airline_sentiment.count().unstack()
airline_sentiment.plot(kind='bar')
sns.barplot(x='airline_sentiment', y='airline_sentiment_confidence', data=airline_tweets)
plt.show()
|
[
"noreply@github.com"
] |
seanongrl.noreply@github.com
|
7fa91757cb83b9c20e92624508e0d4b35a0f4186
|
83ff195640df2b10b697c482723992af950671df
|
/Homework/Homework 7/hw7Part1.py
|
a32fb34031cba26e43f23eb5081e02f3f76fb4d5
|
[] |
no_license
|
sriyuthsagi/CSCI-1100-Computer-Science-I
|
0c28c2d099b0ea811fe2dcfb8da0a925e6165b0e
|
ec8942a0fb7a085e7800d58aa0393f81576fcb8d
|
refs/heads/master
| 2020-05-30T12:29:38.426713
| 2019-06-01T13:28:49
| 2019-06-01T13:28:49
| 189,735,219
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,489
|
py
|
def read_file(file):
file = open(file)
file = file.read().split('\n')
return file
def file_to_dict(file):
file = read_file(file)
i = 0
while i < len(file):
file[i] = file[i].split(',')
i += 1
file.pop()
words = dict()
for item in file:
words[item[0]] = item[1]
return (words)
def keyboard_file_to_dict(file):
file = read_file(file)
i = 0
while i < len(file):
file[i] = file[i].split(' ')
i += 1
replace = dict()
i = 0
while i < len(file):
j = 1
keys = []
while j < len(file[i]):
keys.append(file[i][j])
j += 1
replace[file[i][0]] = keys
i += 1
return (file)
def file_to_list(file):
file = read_file(file)
file = list(file)
if '' in file:
file.remove('')
return (file)
def found(listfile, dictionary, indexlist):
i = 0
while i < len(listfile):
indexlist.append(0)
if listfile[i] in dictionary:
indexlist[i] = 1
i += 1
return listfile, indexlist
def drop(listfile, dictionary, indexlist, wordfre):
i = 0
while i < len(listfile):
if indexlist[i] != 1:
j = 0
for charecter in listfile[i]:
newlistfile = listfile[i][:j] + listfile[i][j+1:]
if newlistfile in dictionary:
wordfre[listfile[i]].append((dictionary[newlistfile], newlistfile))
if indexlist == 0:
indexlist[i] = 2
else:
indexlist[i] += 1
j += 1
i += 1
return listfile, indexlist
def swap(listfile, dictionary, indexlist):
i = 0
while i < len(listfile):
if indexlist[i] != 1:
j = 0
for charecter in listfile[i]:
if j == 0:
newlistfile = listfile[i][1] + listfile[i][0] + listfile[i][2:]
elif j == len(listfile[1]):
newlistfile = listfile[i][:j-1] + listfile[i][j] + listfile[i][j-1]
else:
newlistfile = listfile[i][:j-1] + listfile[i][j] + listfile[i][j-1] + listfile[i][j+1:]
if newlistfile in dictionary:
wordfre[listfile[i]].append((dictionary[newlistfile], newlistfile))
if indexlist[i] == 0:
indexlist[i] = 2
else:
indexlist[i] += 1
j += 1
i += 1
return listfile, indexlist
def replace(listfile, dictionary, indexlist, keyboard):
i = 0
while i < len(listfile):
if indexlist[i] != 1:
j = 0
for charecter in listfile[i]:
a = 0
if charecter == ' ':
charecter = ''
while a < len(keyboard[charecter]):
newlistfile = listfile[i][:j] + keyboard[charecter][a] + listfile[i][j+1:]
if newlistfile in dictionary:
wordfre[listfile[i]].append((dictionary[newlistfile], newlistfile))
if indexlist[i] == 0:
indexlist = 2
else:
indexlist[i] += 1
a += 1
j += 1
i += 1
return listfile, indexlist
"""
dictionary = input('Dictionary => ')
print(dictionary)
listfile = input('Input file => ')
print(listfile)
keyboard = input('Keyboard file =. ')
print(keyboard)
"""
dictionary = 'words_10percent.txt'
listfile = 'input_words.txt'
keyboard = 'keyboard.txt'
dictionary = file_to_dict(dictionary)
listfile = file_to_list(listfile)
keyboard = keyboard_file_to_dict(keyboard)
oldlistfile = list(listfile)
indexlist = []
wordfre = dict()
for i in range(len(listfile)):
wordfre[listfile[i]] = list()
listfile, indexlist = found(listfile, dictionary, indexlist)
listfile, indexlist = drop(listfile, dictionary, indexlist, wordfre)
listfile, indexlist = swap(listfile, dictionary, indexlist)
listfile, indexlist = replace(listfile, dictionary, indexlist, keyboard)
special = []
for word in wordfre:
wordfre[word] = set(wordfre[word])
wordfre[word] = list(wordfre[word])
wordfre[word] = sorted(wordfre[word])
special.append(len(wordfre[word]))
print('Spellcheck results:')
i = 0
while i < len(listfile):
if special[i] < indexlist[i]:
indexlist[i] = special[i]
if indexlist[i] == 0:
index = 'NO MATCH'
elif indexlist[i] == 1:
index = 'FOUND'
if indexlist[i] < 2:
print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], listfile[i], index))
elif indexlist[i] == 2:
print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-1][1], 'MATCH 1'))
elif indexlist[i] == 3:
print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-1][1], 'MATCH 1'))
print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-2][1], 'MATCH 2'))
elif indexlist[i] == 4:
print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-1][1], 'MATCH 1'))
print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-2][1], 'MATCH 2'))
print('{0:15} -> {1:15} :{2}'.format(oldlistfile[i], wordfre[oldlistfile[i]][-3][1], 'MATCH 3'))
|
[
"noreply@github.com"
] |
sriyuthsagi.noreply@github.com
|
e4730e9c78b1d7a9347ffbfd08bfea12b588f9dc
|
7112f6946c2ba8476d94678fa821fa346a2c4313
|
/lambda_service/app/generator/generators/will_generator/parser.py
|
5fadedc080dd46bb61e2ef430dc564da26087c9a
|
[] |
no_license
|
avgustinegorov/willcraftnow-backend
|
f69162a75e5b04ea60eb4b368557fac7601fcb25
|
fe8d4362544da58ec6eda0713dca77345468a63e
|
refs/heads/master
| 2023-06-01T18:40:40.686101
| 2021-06-16T22:00:23
| 2021-06-16T22:00:23
| 377,268,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,032
|
py
|
import json
import random
import string
from copy import deepcopy
from io import BytesIO
import string
from html.parser import HTMLParser
import html
from draftjs_exporter.html import HTML
from draftjs_exporter.dom import DOM
from draftjs_exporter.wrapper_state import WrapperState
from .parser_utils import ParserUtils
from ..base_parser import BaseParser
from ..mark_safe import mark_safe
class DraftToHtmlBlocks(HTML):
def render(self, block):
"""
Starts the export process on a given piece of content state.
"""
wrapper_state = WrapperState(self.block_options, [block])
entity_map = {}
return self.render_block(block, entity_map, wrapper_state)
class MyHTMLParser(HTMLParser):
def __init__(self, block, *args, **kwargs):
self.block = block
self.parsed_data = []
self.styles = []
self.reconstructed_text = ""
self.current_position = 0
self.tag_type = {"b": "BOLD", "em": "ITALIC", "u": "UNDERLINE"}
super().__init__()
def feed(self, block):
super().feed(block["text"])
block["text"] = mark_safe(self.reconstructed_text)
block["inlineStyleRanges"] = self.styles
return block
def handle_starttag(self, tag, attrs):
self.styles.append(
{
"style": self.tag_type[tag],
"offset": self.current_position,
"length": None,
}
)
def handle_endtag(self, tag):
for style in self.styles:
if style["style"] == self.tag_type[tag] and style["length"] == None:
style["length"] = self.current_position - style["offset"]
def handle_data(self, data):
self.reconstructed_text += data
self.current_position += len(data)
class DraftJSMixin:
def generate_draftjs_blocks(self, will_object):
new_will_object = []
for block in will_object:
parser = MyHTMLParser(block)
updated_block = parser.feed(block)
new_will_object.append(updated_block)
return new_will_object
def generate_html_blocks(self, blocks):
updated_blocks = []
for block in blocks:
rendered_block = DraftToHtmlBlocks().render(block)
text = html.unescape(DOM.render(rendered_block))
block["text"] = text
updated_blocks.append(block)
return updated_blocks
return DraftToHtmlBlocks().render(blocks)
class WillParser(BaseParser, DraftJSMixin, ParserUtils):
"""A Base Class used to generate will PDFs"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.main_counter = 0
self.data = self.clean_data(self.data)
self.people = self.data["persons"]
self.executors = [
self.get_entity_details(person)
for person in self.people
if "EXECUTOR" in person["entity_roles"]
]
self.sub_executor = [
self.get_entity_details(person)
for person in self.people
if "SUB_EXECUTOR" in person["entity_roles"]
]
self.guardian = [
self.get_entity_details(person)
for person in self.people
if "GUARDIAN" in person["entity_roles"]
]
self.lastrites = self.data["last_rites"]
self.instructions = self.data["instructions"]
self.witnesses = [
self.get_entity_details(person)
for person in self.people
if "WITNESS" in person["entity_roles"]
]
self.allocations = self.get_allocation_categories(
self.reduce_allocations(self.data['allocations']))
self.onlyResidualAsset = len(self.allocations) == 1 and self.allocations.keys()[
0] == 'Residual'
self.WillObject = []
self.defined_persons = []
def parse_asset_details(self, asset):
asset_details = asset.pop("asset_details")
asset_details.update(asset)
return asset_details
def object_wrapper(self, text, depth=0, type="ordered-list-item", underline=False):
key = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(5)
)
block_object = {
"data": {},
"depth": depth,
"entityRanges": [],
"inlineStyleRanges": [],
"key": key,
"text": text,
"type": type,
}
self.WillObject.append(block_object)
return block_object
def definePersons(self, person, ownership=False):
entity_type = person["entity_type"]
if entity_type == "Person":
if person["id_number"] in self.defined_persons:
result = f'{person["name"].upper()}'
if ownership:
result += "'s"
else:
result = (
f'{person["name"]} (holder of {person["id_document"]} No.'
f' {person["id_number"].upper()}), residing at'
f' {person["address"]} ("{person["name"].upper()}")'
)
self.defined_persons = self.defined_persons + [
person["id_number"],
]
elif entity_type == "Charity":
if person["id"] in self.defined_persons:
result = f'{person["name"].upper()}'
if ownership:
result += "'s"
else:
result = f'{person["name"]} (UEN No. {person["UEN"].upper()})'
self.defined_persons = self.defined_persons + [
person["id"],
]
return result
def ExecutorsHeader(self):
if len(self.executors) == 1:
result = "EXECUTOR"
else:
result = "EXECUTORS"
self.object_wrapper(result, type="header-four", underline=True)
def ExecutorPowersHeader(self):
if len(self.executors) == 1:
result = "EXECUTOR'S POWERS"
else:
result = "EXECUTORS' POWERS"
self.object_wrapper(result, type="header-four", underline=True)
def ExecutorsParagraph(self):
if len(self.executors) == 1:
result = (
f"I appoint {self.definePersons(self.executors[0])} to be the sole"
' Executor and Trustee of this Will (<b>"Executor"</b>).'
)
self.object_wrapper(result)
self.main_counter += 1
else:
result = (
f"I appoint the following persons as joint Executors and Trustees of"
f' this Will (each my <b>"Executor"</b> and collectively, my'
f' <b>"Executors"</b>):'
)
self.object_wrapper(result)
self.main_counter += 1
for index, executor in enumerate(self.executors):
result = (
f"{self.definePersons(executor)}{self.SemiColon(index, self.executors, False)}"
)
self.object_wrapper(result, depth=1)
def SubExecutorsParagraph(self):
assert (len(self.executors) == 1, "Only one sub-executor is allowed")
for sub_executor in self.sub_executor:
if len(self.executors) == 1:
result = (
"If for any reason, my Executor is unable or unwilling to act as"
" executor and trustee of this Will, I appoint"
f" {self.definePersons(sub_executor)} as the sole Executor and"
' Trustee of this Will (<b>"Executor"</b>).'
)
self.object_wrapper(result)
self.main_counter += 1
else:
result = (
"If for any reason, any one of my Executors is unable or unwilling"
" to act as executor and trustee of this Will, I appoint"
f" {self.definePersons(sub_executor)} as alternative Executor to"
" act jointly with the remaining Executors appointed above (<b>"
' "Executor" </b> and jointly with the Executors named above, <b>'
' "Executors" </b>).'
)
self.object_wrapper(result)
self.main_counter += 1
def ExecutorPowersParagraph(self):
result = (
f'My {self.renderPlural(self.executors, "Executor")} shall have full powers to'
" give, sell for cash or upon such terms as"
f" {self.heOrSheOrThey(self.executors)} may deem fit, call in and convert into"
" money, any of my Assets or such part or parts thereof as shall be of a"
" saleable or convertible nature, at such time or times and in such manner"
f' as my {self.renderPlural(self.executors, "Executor")} shall, in'
f" {self.hisOrHerOrTheir(self.executors)} absolute and uncontrolled discretion"
" think fit, with power to postpone such sale, calling in and conversion"
" of such property, or of such part or parts thereof for such period or"
f' periods as my {self.renderPlural(self.executors, "Executor")} in'
f" {self.hisOrHerOrTheir(self.executors)} absolute and uncontrolled discretion"
" think fit, and to the extent necessary, have full powers to pay all my"
" liabilities, debts, mortgages, funeral and testamentary expenses, and"
" any taxes payable by reason of my death from my Estate"
' (<b>"Expenses"</b>).'
)
self.object_wrapper(result)
self.main_counter += 1
result = (
f"The powers of my {self.renderPlural(self.executors, 'Executor')} named herein"
" shall be in addition and in modification of the Executor's powers under"
" the Trustees Act (Cap. 337) of Singapore, any re-enactment thereof and"
" general terms of the laws of Singapore. For the avoidance of doubt, part"
" IVA and IVB of the Trustees Act (Cap. 337) shall apply."
)
self.object_wrapper(result)
self.main_counter += 1
def StartingParagraph(self):
result = (
"<b>THIS IS THE LAST WILL AND TESTAMENT</b> of me,"
f' {self.definePersons(self.user)}'
" in respect of all my assets situated in the Republic of Singapore at the"
' time of my death (<b>"Assets"</b>).'
)
self.object_wrapper(result)
self.main_counter += 1
def RevocationParagraph(self):
result = (
f"I hereby revoke all former wills and testamentary dispositions made by"
f' me, and declare this to be my last will and testament (<b>"Will"</b>)'
f" and that this Will and any codicil to it shall be construed and take"
f" effect in accordance with the laws of the Republic of Singapore."
)
self.object_wrapper(result)
self.main_counter += 1
def GuardianParagraph(self):
assert (len(self.guardian) == 1, "Only one guardian is allowed")
for guardian in self.guardian:
result = (
f"It is my wish that {self.definePersons(guardian)} be appointed as"
" guardian of my child/children"
)
if self.user["relationship_status"] == "Married":
result += (
", as the case may be jointly with my spouse, or solely if my"
" spouse is not able to act by reason of death or incapacity."
)
else:
result += "."
self.object_wrapper(result)
self.main_counter += 1
def InstructionsParagraph(self):
if self.instructions and self.instructions["instructions"]:
result = ""
if (
self.instructions["instructions"] == "Scattered in the Sea"
or self.instructions["instructions"] == "SCATTERED"
):
result = (
"It is my wish to be cremated and my ashes to be scattered at sea."
)
elif (
self.instructions["instructions"] == "Held at a crematorium"
or self.instructions["instructions"] == "CREMATED"
):
result = (
"It is my wish to be cremated and my ashes to be kept at the"
f" crematorium at {self.instructions['crematorium_location']}."
)
elif (
self.instructions["instructions"] == "Buried in Singapore"
or self.instructions["instructions"] == "BURIED"
):
result = "It is my wish to be buried in Singapore."
if result:
self.object_wrapper(result)
self.main_counter += 1
def LastRitesParagraph(self):
if self.lastrites and all(self.lastrites[key] for key in self.lastrites):
duration_unit = "days"
if int(self.lastrites["funeral_duration"]) == 1:
duration_unit = "day"
result = (
f"It is my wish that a {self.lastrites['funeral_religion'].lower()}"
f" funeral be held at {self.lastrites['funeral_location']} for"
f" {self.lastrites['funeral_duration']} {duration_unit}."
)
self.object_wrapper(result)
self.main_counter += 1
def AssetHeader(self, assetName):
if assetName == "RealEstate":
assetHeader = "REAL ESTATE ALLOCATIONS"
elif assetName == "BankAccount":
assetHeader = "BANK ACCOUNT ALLOCATIONS"
elif assetName == "Insurance":
assetHeader = "INSURANCE ALLOCATIONS"
elif assetName == "Investment":
assetHeader = "INVESTMENT ALLOCATIONS"
elif assetName == "Company":
assetHeader = "COMPANY SHARES ALLOCATIONS"
elif assetName == "Residual":
assetHeader = "RESIDUAL ALLOCATIONS"
else: # pragma: no cover
assetHeader = "" # pragma: no cover
self.object_wrapper(assetHeader, type="header-four", underline=True)
def AssetsParagraphs(self):
for assetName, asset_allocations in self.allocations.items():
self.AssetHeader(assetName)
if assetName == "BankAccount":
startRef = self.main_counter
self.BeneficiaryParagraph(
list(filter(
lambda allocation: allocation['allocation_amount'] != None, asset_allocations)),
distributionType="Cash"
)
endRef = self.main_counter
self.BeneficiaryParagraph(
list(filter(
lambda allocation: allocation['allocation_percentage'] != None, asset_allocations)),
subjectTo=[startRef + 1, endRef]
if startRef != endRef
else None,
distributionType="Percentage",
)
elif assetName != "Residual":
self.BeneficiaryParagraph(asset_allocations)
else:
self.ResidualBeneficiaryParagraph(asset_allocations)
def BeneficiaryParagraph(self, allocations, subjectTo=None, distributionType=None):
if len(allocations) == 1:
allocation = allocations[0]
result = (
f"{self.SubjectToExpensesAndClauses(subjectTo)}, I give"
f" {self.PercentageOrAmountOfInterestOrBenefitIn(allocations)}, to"
f" {self.definePersons(allocation['entity'])} absolutely and free from all"
f" encumbrances{self.ResidualStatement(allocation, distributionType)}"
)
self.object_wrapper(result)
self.main_counter += 1
self.SubBeneficiaryParagraph(
allocation['allocations'], allocation, allocations)
elif len(allocations) > 1:
result = (
f"{self.SubjectToExpensesAndClauses(subjectTo)}, I give"
f" {self.PercentageOrAmountOfInterestOrBenefitIn(allocations)}, to the"
" following persons in the corresponding proportions:"
)
self.object_wrapper(result)
self.main_counter += 1
hasResidual = (
sum([float(allocation['allocation_percentage'])
for allocation in allocations]) < 100
if distributionType != "Cash"
else None
)
for index, allocation in enumerate(allocations):
result = (
f"{self.PercentageOrAmount(allocation)} to"
f" {self.definePersons(allocation['entity'])} absolutely and free from all"
f" encumbrances{self.DefineMainSubstitute(index, allocations, self.main_counter)}{self.SemiColon(index, allocations, hasResidual)}"
)
self.object_wrapper(result, depth=1)
if hasResidual and distributionType != "Cash":
result = (
f"the remainder shall be distributed as part of my Residual Assets."
)
self.object_wrapper(result, depth=1)
for index, allocation in enumerate(allocations):
self.SubBeneficiaryParagraph(
allocation['allocations'], allocation, allocations)
def SubBeneficiaryParagraph(self, allocations, parent_allocation, parent_allocations):
hasResidual = (
sum([float(allocation['allocation_percentage'])
for allocation in allocations]) < 100
if parent_allocation["allocation_percentage"]
else None
)
if len(allocations) == 0:
result = (
f"If {self.definePersons(parent_allocation['entity'])} should die during my lifetime,"
" or fail to survive me for more than thirty (30) days, then the"
f" {self.interestOrBenefits(parent_allocation)} in"
f" {self.AssetLabel(parent_allocation['asset'])} which"
f" {self.definePersons(parent_allocation['entity'])} would otherwise have received"
" under this Will shall be distributed equally amongst"
f" {self.definePersons(parent_allocation['entity'], ownership=True)} surviving issues"
" absolutely and free from all encumbrances so long as they survive me"
" for more than thirty (30) days"
)
if len(parent_allocations) != 1:
result += (
f", and if {self.definePersons(parent_allocation['entity'])} does not have any"
" issues who have survived me for more than thirty (30) days, then"
" the same shall be distributed equally amongst all the Main"
" Substitutes who have survived me for more than thirty (30) days."
)
else:
result += "."
self.object_wrapper(result)
self.main_counter += 1
elif len(allocations) == 1:
allocation = allocations[0]
result = (
f"If {self.definePersons(parent_allocation['entity'])} should die during my lifetime,"
" or fail to survive me for more than thirty (30) days, I give"
f" {self.PercentageOrAmount(allocation)} of the"
f" {self.interestOrBenefits(allocation)} in"
f" {self.AssetLabel(allocation['asset'])} which"
f" {self.definePersons(parent_allocation['entity'])} would otherwise have received"
" under this Will"
f" {self.EffectivePercentageOrAmount(allocation)} to"
f" {self.definePersons(allocation['entity'])} absolutely and free from all"
" encumbrances, provided always that if"
f" {self.definePersons(allocation['entity'])} should die during my lifetime,"
" or fail to survive me for more than thirty (30) days, then the same"
" shall be distributed equally amongst"
f" {self.definePersons(allocation['entity'], ownership=True)} surviving"
" issues absolutely and free from all encumbrances so long as they"
" survive me for more than thirty (30) days"
)
if len(parent_allocations) != 1 and self.has_other_main_substitutes(
allocation, parent_allocation, parent_allocations
):
result += (
f", and if {self.definePersons(allocation['entity'])} does not have any"
" issues who have survived me for more than thirty (30) days, then"
" the same shall be distributed equally amongst all the Main"
" Substitutes who have survived me for more than thirty (30) days"
)
if hasResidual:
result += (
f", and the remainder shall be distributed as part of my Residual"
f" Assets"
)
result += "."
self.object_wrapper(result)
self.main_counter += 1
else:
result = (
f"If {self.definePersons(parent_allocation['entity'])} should die during my lifetime,"
" or fail to survive me for more than thirty (30) days, I give the"
f" {self.interestOrBenefits(parent_allocation)} in"
f" {self.AssetLabel(parent_allocation['asset'])} which"
f" {self.definePersons(parent_allocation['entity'])} would otherwise have received"
" under this Will, to the following persons in the corresponding"
" proportions:"
)
self.object_wrapper(result)
self.main_counter += 1
for index, allocation in enumerate(allocations):
temp_result = (
f"{self.PercentageOrAmount(allocation)}"
f" {self.EffectivePercentageOrAmount(allocation)}"
f" to {self.definePersons(allocation['entity'])} absolutely and free"
" from all"
f" encumbrances{self.DefineApplicableSubstitute(index, allocations)}{self.SemiColon(index, allocations, hasResidual)}"
)
self.object_wrapper(temp_result, depth=1)
if hasResidual:
temp_result = (
f"the remainder shall be distributed as part of my Residual Assets."
)
self.object_wrapper(temp_result, depth=1)
temp_result = (
"Provided Always that if any of the Applicable Substitutes, should die"
" during my lifetime, or fail to survive me for more than thirty (30)"
" days, then any gift devise or bequest of my"
f" {self.interestOrBenefits(parent_allocation)} in"
f" {self.AssetLabel(parent_allocation['asset'])} which that Applicable Substitute"
" would otherwise have received under this Will shall be distributed"
" equally amongst the issues of that Applicable Substitute so long as"
" they survive me for more than thirty (30) days, and if that"
" Applicable Substitute does not have any issues who have survived me"
" for more than thirty (30) days, then the same shall be distributed"
" equally amongst all the Applicable Substitutes who have survived me"
" for more than thirty (30) days."
)
# result += ParagraphWrapperNoBullet(temp_result, context)
self.object_wrapper(temp_result, depth=2)
def ResidualBeneficiaryParagraph(self, allocations):
clausesAbove = (
" not distributed in the clauses above "
if not self.onlyResidualAsset
else " "
)
if len(allocations) == 1:
allocation = allocations[0]
result = (
f"I give all my Assets{clausesAbove}{self.AssetDefinition(allocation['asset'])}"
f" to {self.definePersons(allocation['entity'])} absolutely and free from all"
" encumbrances."
)
self.object_wrapper(result)
self.main_counter += 1
self.ResidualSubBeneficiaryParagraph(
allocation['allocations'], allocation, allocations)
elif len(allocations) > 1:
result = (
f"I give all my Assets{clausesAbove}{self.AssetDefinition(allocations[0]['asset'])}"
" to the following persons in the corresponding proportions:"
)
self.object_wrapper(result)
self.main_counter += 1
for index, allocation in enumerate(allocations):
result = (
f"{self.PercentageOrAmount(allocation)} to"
f" {self.definePersons(allocation['entity'])} absolutely and free from all"
f" encumbrances{self.DefineMainSubstitute(index, allocations, self.main_counter)}{self.SemiColon(index, allocations, False)}"
)
self.object_wrapper(result, depth=1)
for index, allocation in enumerate(allocations):
self.ResidualSubBeneficiaryParagraph(
allocation['allocations'], allocation, allocations)
def ResidualSubBeneficiaryParagraph(self, allocations, parent_allocation, parent_allocations):
if len(allocations) == 0:
result = (
f"If {self.definePersons(parent_allocation['entity'])} should die during my lifetime,"
" or fail to survive me for more than thirty (30) days, then the"
f" proportion of my {self.AssetLabel(parent_allocation['asset'])} which"
f" {self.heOrShe(parent_allocation['entity'])} would otherwise have received under this Will"
f" shall be distributed equally amongst {self.hisOrHer(parent_allocation['entity'])}"
" surviving issues absolutely and free from all encumbrances so long"
" as they survive me for more than thirty (30) days"
)
if len(parent_allocations) != 1:
result += (
f", and if {self.definePersons(parent_allocation['entity'])} does not have any"
" issues who have survived me for more than thirty (30) days, then"
" the same shall be distributed equally amongst all the Main"
" Substitutes who have survived me for more than thirty (30) days."
)
else:
result += "."
self.object_wrapper(result)
self.main_counter += 1
elif len(allocations) == 1:
allocation = allocations[0]
result = (
f"If {self.definePersons(parent_allocation['entity'])} should die during my lifetime,"
" or fail to survive me for more than thirty (30) days, I give"
f" {self.PercentageOrAmount(allocation)}"
f" {self.EffectivePercentageOrAmount(allocation)} of"
f" the {self.AssetLabel(allocation['asset'])} which {self.heOrShe(parent_allocation['entity'])}"
" would otherwise have received under this Will to"
f" {self.definePersons(allocation['entity'])} absolutely and free from all"
" encumbrances, provided always that if"
f" {self.definePersons(allocation['entity'])} should die during my lifetime,"
" or fail to survive me for more than thirty (30) days, then the same"
f" shall be distributed equally amongst {self.hisOrHer(allocation['entity'])}"
" surviving issues absolutely and free from all encumbrances so long"
" as they survive me for more than thirty (30) days"
)
if len(parent_allocations) != 1:
result += (
f", and if {self.definePersons(allocation['entity'])} does not have any"
" issues who have survived me for more than thirty (30) days, then"
" the same shall be distributed equally amongst all the Main"
" Substitutes who have survived me for more than thirty (30) days."
)
else:
result += "."
self.object_wrapper(result)
self.main_counter += 1
else:
result = (
f"If {self.definePersons(parent_allocation['entity'])} should die during my lifetime,"
" or fail to survive me for more than thirty (30) days, I give the"
f" {self.interestOrBenefits(parent_allocation)} in"
f" {self.AssetLabel(parent_allocation['asset'])} which {self.heOrShe(parent_allocation['entity'])} would"
" otherwise have received under this Will, to the following persons in"
" the corresponding proportions:"
)
self.object_wrapper(result)
self.main_counter += 1
for index, allocation in enumerate(allocations):
hasResidual = sum([float(allocation['allocation_percentage'])
for allocation in allocations]) < 100
temp_result = (
f"{self.PercentageOrAmount(allocation)}"
f" {self.EffectivePercentageOrAmount(allocation)}"
f" to {self.definePersons(allocation['entity'])} absolutely and free"
" from all"
f" encumbrances{self.DefineApplicableSubstitute(index, allocations)}{self.SemiColon(index, allocations, hasResidual)}"
)
self.object_wrapper(temp_result, depth=1)
temp_result = (
"Provided Always that if any of the Applicable Substitutes, should die"
" during my lifetime, or fail to survive me for more than thirty (30)"
f" days, the proportion of my {self.AssetLabel(parent_allocation['asset'])} which that"
" Applicable Substitute would otherwise have received under this Will"
" shall be distributed equally amongst the issues of that Applicable"
" Substitute so long as they survive me for more than thirty (30)"
" days, and if that Applicable Substitute does not have any issues who"
" have survived me for more than thirty (30) days, then the same shall"
" be distributed equally amongst all the Applicable Substitutes who"
" have survived me for more than thirty (30) days."
)
# result += ParagraphWrapperNoBullet(temp_result, context)
self.object_wrapper(temp_result, depth=2)
def parse(self):
self.StartingParagraph()
self.RevocationParagraph()
self.ExecutorsHeader()
self.ExecutorsParagraph()
self.SubExecutorsParagraph()
self.ExecutorPowersHeader()
self.ExecutorPowersParagraph()
self.AssetsParagraphs()
self.GuardianParagraph()
self.InstructionsParagraph()
self.LastRitesParagraph()
return {
**self.data,
'witnesses': self.witnesses,
"blocks": self.generate_draftjs_blocks(self.WillObject)
}
|
[
"avgustinegorov@outlook.com"
] |
avgustinegorov@outlook.com
|
5f4faa5a99b512529973a47b04b211cca259e660
|
9c66ae49534862e838acc661c2909554f3c8acc3
|
/.gdbinit.d/python/qt5/printers.py
|
422901fdcee29bc72a88c758ef09e0713c133a44
|
[] |
no_license
|
oxidase/home
|
9f04404736c3c9f39c10c4917da3e377336b7c75
|
df0d6208c98318d99a2ecd87a426ee3d7b963e90
|
refs/heads/master
| 2023-08-13T20:58:12.310405
| 2023-07-23T11:26:24
| 2023-07-23T11:26:24
| 10,049,686
| 3
| 1
| null | 2016-08-02T17:44:45
| 2013-05-14T07:34:40
|
Emacs Lisp
|
UTF-8
|
Python
| false
| false
| 23,468
|
py
|
# -*- coding: iso-8859-1 -*-
# Pretty-printers for Qt5.
# Copyright (C) 2009 Niko Sams <niko.sams@gmail.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
import itertools
import re
import struct
class QtPrivateRefCountPrinter:
def __init__(self, typename, val):
self.val = val
def to_string(self):
return '%d' % int(self.val['atomic']['_q_value'])
class QStringPrinter:
def __init__(self, typename, val):
self.val = val
def to_string(self):
size = self.val['d']['size']
ret = ""
qt5 = 0
try:
# Qt4 has d->data, Qt5 doesn't.
self.val['d']['data']
except Exception:
qt5 = 1
# The QString object might be not yet initialized. In this case size is a bogus value
# and the following 2 lines might throw memory access error. Hence the try/catch.
try:
if qt5:
dataAsCharPointer = (self.val['d'] + 1).cast(gdb.lookup_type("char").pointer())
else:
dataAsCharPointer = self.val['d']['data'].cast(gdb.lookup_type("char").pointer())
ret = dataAsCharPointer.string(encoding = 'UTF-16', length = size * 2)
except Exception:
# swallow the exception and return empty string
pass
return ret
def display_hint (self):
return 'string'
class QByteArrayPrinter:
def __init__(self, typename, val):
self.val = val
def to_string(self):
pointer = self.val['d'].cast(gdb.lookup_type("char").pointer())+self.val['d']['offset']
ret = map(lambda i: hex(int(pointer[i]) % 0x100), range(min(4096, int(self.val['d']['size']))))
return 'QByteArray of length %d' % (int(self.val['d']['size'])) + ' = {' + ', '.join(ret) + '}'
def display_hint (self):
return 'array'
class QListPrinter:
"Print a QList"
class _iterator:
def __init__(self, nodetype, d):
self.nodetype = nodetype
self.d = d
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.count >= self.d['end'] - self.d['begin']:
raise StopIteration
count = self.count
array = self.d['array'][self.d['begin'] + count]
#from QTypeInfo::isLarge
isLarge = self.nodetype.sizeof > gdb.lookup_type('void').pointer().sizeof
isPointer = self.nodetype.code == gdb.TYPE_CODE_PTR
#unfortunately we can't use QTypeInfo<T>::isStatic as it's all inlined, so use
#this list of types that use Q_DECLARE_TYPEINFO(T, Q_MOVABLE_TYPE)
#(obviously it won't work for custom types)
movableTypes = ['QRect', 'QRectF', 'QString', 'QMargins', 'QLocale', 'QChar', 'QDate', 'QTime', 'QDateTime', 'QVector',
'QRegExpr', 'QPoint', 'QPointF', 'QByteArray', 'QSize', 'QSizeF', 'QBitArray', 'QLine', 'QLineF', 'QModelIndex', 'QPersitentModelIndex',
'QVariant', 'QFileInfo', 'QUrl', 'QXmlStreamAttribute', 'QXmlStreamNamespaceDeclaration', 'QXmlStreamNotationDeclaration',
'QXmlStreamEntityDeclaration']
#this list of types that use Q_DECLARE_TYPEINFO(T, Q_PRIMITIVE_TYPE) (from qglobal.h)
primitiveTypes = ['bool', 'char', 'signed char', 'unsigned char', 'short', 'unsigned short', 'int', 'unsigned int', 'long', 'unsigned long', 'long long', 'unsigned long long', 'float', 'double']
if movableTypes.count(self.nodetype.tag) or primitiveTypes.count(str(self.nodetype)):
isStatic = False
else:
isStatic = not isPointer
if isLarge or isStatic: #see QList::Node::t()
node = array.cast(gdb.lookup_type('QList<%s>::Node' % self.nodetype).pointer())
else:
node = array.cast(gdb.lookup_type('QList<%s>::Node' % self.nodetype))
self.count = self.count + 1
return ('[%d]' % count, node['v'].cast(self.nodetype))
def __init__(self, typename, val):
self.val = val
self.typename = typename
try:
self.itype = self.val.type.template_argument(0)
except RuntimeError:
self.itype = None
def children(self):
if self.itype is not None:
itype = self.itype
elif self.typename == 'QStringList':
itype = gdb.lookup_type('QString')
else:
itype = None
return self._iterator(itype, self.val['d'])
def to_string(self):
empty = 'empty ' if self.val['d']['end'] == self.val['d']['begin'] else ''
type = self.typename if self.itype is None else '%s<%s>' % (self.typename, self.itype)
return empty + type
class QVectorPrinter:
"Print a QVector"
class _iterator:
def __init__(self, nodetype, d, size):
self.nodetype = nodetype
self.d = d
self.size = size
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.count >= self.size:
raise StopIteration
count = self.count
self.count = self.count + 1
return ('[%d]' % count, self.d[count])
def __init__(self, typename, val):
self.val = val
self.typename = typename
self.itype = self.val.type.template_argument(0)
def children(self):
pointer = self.val['d'].cast(gdb.lookup_type("char").pointer())+self.val['d']['offset']
size = int(self.val['d']['size'])
return self._iterator(self.itype, pointer.cast(self.itype.pointer()), size)
def to_string(self):
if self.val['d']['size'] == 0:
empty = "empty "
else:
empty = ""
return "%s%s<%s>" % ( empty, self.typename, self.itype )
class QLinkedListPrinter:
"Print a QLinkedList"
class _iterator:
def __init__(self, nodetype, begin, size):
self.nodetype = nodetype
self.it = begin
self.pos = 0
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.pos >= self.size:
raise StopIteration
pos = self.pos
val = self.it['t']
self.it = self.it['n']
self.pos = self.pos + 1
return ('[%d]' % pos, val)
def __init__(self, val):
self.val = val
self.itype = self.val.type.template_argument(0)
def children(self):
return self._iterator(self.itype, self.val['e']['n'], self.val['d']['size'])
def to_string(self):
if self.val['d']['size'] == 0:
empty = "empty "
else:
empty = ""
return "%sQLinkedList<%s>" % ( empty, self.itype )
class QMapPrinter:
"Print a QMap"
class _iterator:
def __init__(self, val):
self.val = val
self.ktype = self.val.type.template_argument(0)
self.vtype = self.val.type.template_argument(1)
self.data_node = self.val['e']['forward'][0]
self.count = 0
def __iter__(self):
return self
def payload (self):
if gdb.parse_and_eval:
ret = int(gdb.parse_and_eval('QMap<%s, %s>::payload()' % (self.ktype, self.vtype)))
if (ret): return ret;
#if the inferior function call didn't work, let's try to calculate ourselves
#we can't use QMapPayloadNode as it's inlined
#as a workaround take the sum of sizeof(members)
ret = self.ktype.sizeof
ret += self.vtype.sizeof
ret += gdb.lookup_type('void').pointer().sizeof
#but because of data alignment the value can be higher
#so guess it's aliged by sizeof(void*)
#TODO: find a real solution for this problem
ret += ret % gdb.lookup_type('void').pointer().sizeof
#for some reason booleans are different
if str(self.vtype) == 'bool':
ret += 2
ret -= gdb.lookup_type('void').pointer().sizeof
return ret
def concrete (self, data_node):
node_type = gdb.lookup_type('QMapNode<%s, %s>' % (self.ktype, self.vtype)).pointer()
return (data_node.cast(gdb.lookup_type('char').pointer()) - self.payload()).cast(node_type)
def __next__(self):
if self.data_node == self.val['e']:
raise StopIteration
node = self.concrete(self.data_node).dereference()
if self.count % 2 == 0:
item = node['key']
else:
item = node['value']
self.data_node = node['forward'][0]
result = ('[%d]' % self.count, item)
self.count = self.count + 1
return result
def __init__(self, val, container):
self.val = val
self.container = container
def children(self):
return self._iterator(self.val)
def to_string(self):
if self.val['d']['size'] == 0:
empty = "empty "
else:
empty = ""
return "%s%s<%s, %s>" % ( empty, self.container, self.val.type.template_argument(0), self.val.type.template_argument(1) )
def display_hint (self):
return 'map'
class QHashPrinter:
"Print a QHash"
class _iterator:
def __init__(self, val):
self.val = val
self.d = self.val['d']
self.ktype = self.val.type.template_argument(0)
self.vtype = self.val.type.template_argument(1)
self.end_node = self.d.cast(gdb.lookup_type('QHashData::Node').pointer())
self.data_node = self.firstNode()
self.count = 0
def __iter__(self):
return self
def hashNode (self):
"Casts the current QHashData::Node to a QHashNode and returns the result. See also QHash::concrete()"
return self.data_node.cast(gdb.lookup_type('QHashNode<%s, %s>' % (self.ktype, self.vtype)).pointer())
def firstNode (self):
"Get the first node, See QHashData::firstNode()."
e = self.d.cast(gdb.lookup_type('QHashData::Node').pointer())
#print "QHashData::firstNode() e %s" % e
bucketNum = 0
bucket = self.d['buckets'][bucketNum]
#print "QHashData::firstNode() *bucket %s" % bucket
n = self.d['numBuckets']
#print "QHashData::firstNode() n %s" % n
while n:
#print "QHashData::firstNode() in while, n %s" % n;
if bucket != e:
#print "QHashData::firstNode() in while, return *bucket %s" % bucket
return bucket
bucketNum += 1
bucket = self.d['buckets'][bucketNum]
#print "QHashData::firstNode() in while, new bucket %s" % bucket
n -= 1
#print "QHashData::firstNode() return e %s" % e
return e
def nextNode (self, node):
"Get the nextNode after the current, see also QHashData::nextNode()."
#print "******************************** nextNode"
#print "nextNode: node %s" % node
next = node['next'].cast(gdb.lookup_type('QHashData::Node').pointer())
e = next
#print "nextNode: next %s" % next
if next['next']:
#print "nextNode: return next"
return next
#print "nextNode: node->h %s" % node['h']
#print "nextNode: numBuckets %s" % self.d['numBuckets']
start = (node['h'] % self.d['numBuckets']) + 1
bucketNum = start
#print "nextNode: start %s" % start
bucket = self.d['buckets'][start]
#print "nextNode: bucket %s" % bucket
n = self.d['numBuckets'] - start
#print "nextNode: n %s" % n
while n:
#print "nextNode: in while; n %s" % n
#print "nextNode: in while; e %s" % e
#print "nextNode: in while; *bucket %s" % bucket
if bucket != e:
#print "nextNode: in while; return bucket %s" % bucket
return bucket
bucketNum += 1
bucket = self.d['buckets'][bucketNum]
n -= 1
#print "nextNode: return e %s" % e
return e
def __next__(self):
"GDB iteration, first call returns key, second value and then jumps to the next hash node."
if self.data_node == self.end_node:
raise StopIteration
node = self.hashNode()
if self.count % 2 == 0:
item = node['key']
else:
item = node['value']
self.data_node = self.nextNode(self.data_node)
self.count = self.count + 1
return ('[%d]' % self.count, item)
def __init__(self, val, container):
self.val = val
self.container = container
def children(self):
return self._iterator(self.val)
def to_string(self):
if self.val['d']['size'] == 0:
empty = "empty "
else:
empty = ""
return "%s%s<%s, %s>" % ( empty, self.container, self.val.type.template_argument(0), self.val.type.template_argument(1) )
def display_hint (self):
return 'map'
class QDatePrinter:
def __init__(self, typename, val):
self.val = val
def to_string(self):
julianDay = self.val['jd']
if julianDay == 0:
return "invalid QDate"
# Copied from Qt sources
if julianDay >= 2299161:
# Gregorian calendar starting from October 15, 1582
# This algorithm is from Henry F. Fliegel and Thomas C. Van Flandern
ell = julianDay + 68569;
n = (4 * ell) / 146097;
ell = ell - (146097 * n + 3) / 4;
i = (4000 * (ell + 1)) / 1461001;
ell = ell - (1461 * i) / 4 + 31;
j = (80 * ell) / 2447;
d = ell - (2447 * j) / 80;
ell = j / 11;
m = j + 2 - (12 * ell);
y = 100 * (n - 49) + i + ell;
else:
# Julian calendar until October 4, 1582
# Algorithm from Frequently Asked Questions about Calendars by Claus Toendering
julianDay += 32082;
dd = (4 * julianDay + 3) / 1461;
ee = julianDay - (1461 * dd) / 4;
mm = ((5 * ee) + 2) / 153;
d = ee - (153 * mm + 2) / 5 + 1;
m = mm + 3 - 12 * (mm / 10);
y = dd - 4800 + (mm / 10);
if y <= 0:
--y;
return "%d-%02d-%02d" % (y, m, d)
class QTimePrinter:
def __init__(self, typename, val):
self.val = val
def to_string(self):
ds = self.val['mds']
if ds == -1:
return "invalid QTime"
MSECS_PER_HOUR = 3600000
SECS_PER_MIN = 60
MSECS_PER_MIN = 60000
hour = ds / MSECS_PER_HOUR
minute = (ds % MSECS_PER_HOUR) / MSECS_PER_MIN
second = (ds / 1000)%SECS_PER_MIN
msec = ds % 1000
return "%02d:%02d:%02d.%03d" % (hour, minute, second, msec)
class QDateTimePrinter:
def __init__(self, typename, val):
self.val = val
def to_string(self):
from datetime import datetime as dt
from pytz import reference, utc
localtime = reference.LocalTimezone()
ms = int(self.val['d']['d']['m_msecs'])
ts = dt.fromtimestamp(ms // 1000, tz=utc)
return ts.strftime('%Y %h %d %H:%M:%S') + ('.%d %s (%d)'%(ms % 1000, localtime.tzname(ts), ms))
class QUrlPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
try:
return self.val['d']['encodedOriginal']
except RuntimeError:
#if no debug information is avaliable for Qt, try guessing the correct address for encodedOriginal
#problem with this is that if QUrlPrivate members get changed, this fails
offset = gdb.lookup_type('int').sizeof
offset += offset % gdb.lookup_type('void').pointer().sizeof #alignment
offset += gdb.lookup_type('QString').sizeof * 6
offset += gdb.lookup_type('QByteArray').sizeof
encodedOriginal = self.val['d'].cast(gdb.lookup_type('char').pointer());
encodedOriginal += offset
encodedOriginal = encodedOriginal.cast(gdb.lookup_type('QByteArray').pointer()).dereference();
encodedOriginal = encodedOriginal['d']['data'].string()
return encodedOriginal
class QSetPrinter:
"Print a QSet"
def __init__(self, val):
self.val = val
class _iterator:
def __init__(self, hashIterator):
self.hashIterator = hashIterator
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.hashIterator.data_node == self.hashIterator.end_node:
raise StopIteration
node = self.hashIterator.hashNode()
item = node['key']
self.hashIterator.data_node = self.hashIterator.nextNode(self.hashIterator.data_node)
self.count = self.count + 1
return ('[%d]' % (self.count-1), item)
def children(self):
hashPrinter = QHashPrinter(self.val['q_hash'], None)
hashIterator = hashPrinter._iterator(self.val['q_hash'])
return self._iterator(hashIterator)
def to_string(self):
if self.val['q_hash']['d']['size'] == 0:
empty = "empty "
else:
empty = ""
return "%sQSet<%s>" % ( empty , self.val.type.template_argument(0) )
class QCharPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
return unichr(self.val['ucs'])
def display_hint (self):
return 'string'
class QUuidPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
return "QUuid({%x-%x-%x-%x%x-%x%x%x%x%x%x})" % (self.val['data1'], self.val['data2'], self.val['data3'],
self.val['data4'][0], self.val['data4'][1],
self.val['data4'][2], self.val['data4'][3],
self.val['data4'][4], self.val['data4'][5],
self.val['data4'][6], self.val['data4'][7])
def display_hint (self):
return 'string'
# A "regular expression" printer which conforms to the
# "SubPrettyPrinter" protocol from gdb.printing.
class RxPrinter(object):
def __init__(self, name, function):
super(RxPrinter, self).__init__()
self.name = name
self.function = function
self.enabled = True
def invoke(self, value):
if not self.enabled:
return None
if value.type.code == gdb.TYPE_CODE_REF:
if hasattr(gdb.Value,"referenced_value"):
value = value.referenced_value()
return self.function(self.name, value)
# A pretty-printer that conforms to the "PrettyPrinter" protocol from
# gdb.printing. It can also be used directly as an old-style printer.
class Printer(object):
def __init__(self, name):
super(Printer, self).__init__()
self.name = name
self.subprinters = []
self.lookup = {}
self.enabled = True
self.compiled_rx = re.compile('^(Q[a-zA-Z0-9_:]+)(<.*>)?$')
def add(self, name, function):
# A small sanity check.
# FIXME
if not self.compiled_rx.match(name + '<>'):
raise ValueError('libstdc++ programming error: "%s" does not match' % name)
printer = RxPrinter(name, function)
self.subprinters.append(printer)
self.lookup[name] = printer
@staticmethod
def get_basic_type(type):
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Get the unqualified type, stripped of typedefs.
type = type.unqualified ().strip_typedefs ()
return type.tag
def __call__(self, val):
typename = self.get_basic_type(val.type)
if not typename:
return None
# All the types we match are template types, so we can use a
# dictionary.
match = self.compiled_rx.match(typename)
if not match:
return None
basename = match.group(1)
if val.type.code == gdb.TYPE_CODE_REF:
if hasattr(gdb.Value,"referenced_value"):
val = val.referenced_value()
if basename in self.lookup:
return self.lookup[basename].invoke(val)
# Cannot find a pretty printer. Return None.
return None
qt5_printer = None
def register_qt5_printers (obj):
global qt5_printer
if obj is None:
obj = gdb
obj.pretty_printers.append(qt5_printer)
def build_dictionary ():
global qt5_printer
qt5_printer = Printer("Qt5")
qt5_printer.add('QtPrivate::RefCount', QtPrivateRefCountPrinter)
qt5_printer.add('QString', QStringPrinter)
qt5_printer.add('QByteArray', QByteArrayPrinter)
qt5_printer.add('QStringList', QListPrinter)
qt5_printer.add('QList', QListPrinter)
qt5_printer.add('QVector', QVectorPrinter)
qt5_printer.add('QDate', QDatePrinter)
qt5_printer.add('QTime', QTimePrinter)
qt5_printer.add('QDateTime', QDateTimePrinter)
# TODO: remove
# pretty_printers_dict[re.compile('^QQueue')] = lambda val: QListPrinter(val, 'QQueue', None)
# pretty_printers_dict[re.compile('^QStack<.*>$')] = lambda val: QVectorPrinter(val, 'QStack')
# pretty_printers_dict[re.compile('^QLinkedList<.*>$')] = lambda val: QLinkedListPrinter(val)
# pretty_printers_dict[re.compile('^QMap<.*>$')] = lambda val: QMapPrinter(val, 'QMap')
# pretty_printers_dict[re.compile('^QMultiMap<.*>$')] = lambda val: QMapPrinter(val, 'QMultiMap')
# pretty_printers_dict[re.compile('^QHash<.*>$')] = lambda val: QHashPrinter(val, 'QHash')
# pretty_printers_dict[re.compile('^QMultiHash<.*>$')] = lambda val: QHashPrinter(val, 'QMultiHash')
# pretty_printers_dict[re.compile('^QUrl$')] = lambda val: QUrlPrinter(val)
# pretty_printers_dict[re.compile('^QSet<.*>$')] = lambda val: QSetPrinter(val)
# pretty_printers_dict[re.compile('^QChar$')] = lambda val: QCharPrinter(val)
# pretty_printers_dict[re.compile('^QUuid')] = lambda val: QUuidPrinter(val)
build_dictionary ()
|
[
"michael.krasnyk@gmail.com"
] |
michael.krasnyk@gmail.com
|
d52c96d4227682abf104f5ca08c26c7324a23c7b
|
0d52600137490a0b4aa0eca41831d1f2cb0c65e4
|
/backend/users/migrations/0002_auto_20200923_0531.py
|
1192f7a3c17d474a694bcd54cd9778ebc0197b7e
|
[] |
no_license
|
crowdbotics-apps/golden-mapleleaf-20599
|
994bef57a8e56572f794f290c85e92c0112cc9da
|
3756ce96467cf9ec8b351e8bcfaf222dbc058723
|
refs/heads/master
| 2022-12-24T16:09:26.285202
| 2020-09-23T05:31:45
| 2020-09-23T05:31:45
| 297,862,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
# Generated by Django 2.2.16 on 2020-09-23 05:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
5ee65380bebb6699e1fa35149f2330cead284f78
|
55fdd6d73b245e0758ba1e76613b6b28d09c79c3
|
/src/save_rep_callback.py
|
822748dc14e74289bef2ea0e264c66fa2d9e0553
|
[] |
no_license
|
sumedharai12/semantic_cognition
|
f7c773231198a10e334853c04ed9bbda548d6a45
|
d0fefd60d52b5f8faa2eb736debd4b7ea405dd6e
|
refs/heads/main
| 2023-04-18T20:55:58.501796
| 2021-04-24T23:22:44
| 2021-04-24T23:22:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from pytorch_lightning.callbacks import Callback
class MyPrintingCallback(Callback):
def on_epoch_end(self, trainer, pl_module):
### TODO
pass
|
[
"annikabrundyn1@gmail.com"
] |
annikabrundyn1@gmail.com
|
efb07188716ca48860591a7eb35852c93b2a97ae
|
b220f91291af02c5416c3f92349472599096175d
|
/Project 2 - Genetic Algorithms/grid.py
|
c2b077b3f09f6f03bce78e9967750746c7cacb73
|
[] |
no_license
|
Kfirinb/Computational-Biology-Course
|
2dedb84111495d277a8cb067d7a8eb3c085f8f19
|
f6d217024ac51a59d77ba6fecaf0794439e7494f
|
refs/heads/main
| 2023-08-11T08:38:18.763638
| 2021-09-28T14:14:01
| 2021-09-28T14:14:01
| 411,067,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,972
|
py
|
"""
Computational Biology: Second Assignment.
Developed by CUCUMBER an OrSN Company and Kfir Inbal
May 2021.
UNAUTHORIZED REPLICATION OF THIS WORK IS STRICTLY PROHIBITED.
"""
from sys import argv
import pygame
import numpy as np
import random
# Basic grid design, courtesy of Auctux: https://www.youtube.com/watch?v=d73z8U0iUYE
import pygame
import pygame.font
import numpy as np
import random
class Grid:
def __init__(self, width, height, offset, screen):
self.chuncks_flag = 0
self.columns = 33
self.rows = 33
self.screen = screen
self.scale = width / (self.columns)
self.size = (self.rows, self.columns)
self.grid_array = np.ndarray(shape=(self.size)).astype(int)
self.offset = offset
self.conditionsArray = [] # Filled @ Def SetConditions
self.setConditions()
self.solutionsCollection = []
def random2d_array(self, mode):
if mode == 1:
matrixToModify = self.grid_array
else:
matrixToModify = np.ndarray(shape=(self.size)).astype(int)
for x in range(self.rows):
for y in range(self.columns):
if((x < 8) and (y < 8)):
matrixToModify[x][y] = 3
elif((x < 8) or (y < 8)):
matrixToModify[x][y] = 2
elif mode != 3:
matrixToModify[x][y] = random.randint(0, 1)
if mode == 1:
self.grid_array = matrixToModify
self.solutionsCollection.append(matrixToModify)
else:
return matrixToModify
def gridUpdater(self,off_color, on_color, surface):
for x in range(self.rows):
for y in range(self.columns):
y_pos = (y) * self.scale
yScale = self.scale - self.offset
x_pos = (x) * self.scale
xScale = self.scale - self.offset
if self.grid_array[x][y] == 3: # Corner cell
pygame.draw.rect(surface, (0, 0, 0), [x_pos, y_pos, xScale, yScale])
elif self.grid_array[x][y] == 2: # Conditional cell
pygame.draw.rect(surface, off_color, [x_pos, y_pos, xScale, yScale])
font = pygame.font.SysFont(None, 20)
if(y < 8): # Lines 0-24
try:
if(self.conditionsArray[x - 8][y] != 0):
img = font.render(str(self.conditionsArray[x - 8][y]), True, (0,0,0))
self.screen.blit(img,(x_pos, y_pos))
# If condition is 0, we omit it and leaving the cell empty.
except: # Handles the event the customer didn't included a condition at all.
continue
else: # Lines 25-49
try:
if(self.conditionsArray[y + 17][x] != 0):
img = font.render(str(self.conditionsArray[y + 17][x]), True, (0, 0, 0))
self.screen.blit(img, (x_pos, y_pos))
# If condition is 0, we omit it and leaving the cell empty.
except:
continue
elif self.grid_array[x][y] == 1:
#rect = pygame.draw.rect(surface, on_color, [x_pos, y_pos, xScale, yScale])
pygame.draw.rect(surface, on_color, [x_pos, y_pos, xScale, yScale])
#else:
elif self.grid_array[x][y] == 0:
#rect = pygame.draw.rect(surface, off_color, [x_pos, y_pos, xScale, yScale])
pygame.draw.rect(surface, off_color, [x_pos, y_pos, xScale, yScale])
#self.comparePopulation() # Working on the general population.
#self.grid_array = next
if(len(self.solutionsCollection) == 1): # Generating second random solution
self.solutionGenerator(3, 100)
else:
mode = random.randint(1, 2) # Determining if crossover is going to be on column or row
pivot = random.randint(0, 24) # Determining the the point in which we perform the cross over.
self.solutionGenerator(mode, pivot)
def setConditions(self): # Reads the conditions from the input file.
counter = 0
file = open(argv[1], "r+")
for line in file:
if(counter == 50):
break
lineOfConditions = []
for condition in line.split():
lineOfConditions.append(int(condition))
self.conditionsArray.append(lineOfConditions)
counter += 1
file.close()
self.properSorter()
def properSorter(self):
for lineOfConditions in self.conditionsArray:
properFlag = 0 # This flag checks for the sanity of the conditions array. It gives a green light to proceed
# iff all 0's are before the non zero conditions.
while properFlag == 0:
somethingChangedFlag = 0
for index in range(len(lineOfConditions)):
if index < len(lineOfConditions) - 1:
if lineOfConditions[index] != 0 and lineOfConditions[index + 1] == 0:
lineOfConditions[index + 1] = lineOfConditions[index]
lineOfConditions[index] = 0
somethingChangedFlag = 1
if somethingChangedFlag == 0:
properFlag = 1
def solutionGenerator(self, mode, pivot):
solution1 = self.random2d_array(3)
solution2 = self.random2d_array(3)
if mode == 1: # Rows
for x in range(8, pivot + 8):
for y in range(8, self.rows):
solution1[x][y] = self.solutionsCollection[-2][x][y]
solution2[x][y] = self.solutionsCollection[-1][x][y]
for x in range(pivot + 8, self.columns):
for y in range(8, self.rows):
# if x == pivot + 8:
# solution1[x][y] = 10
# solution1[x][y] = 10
#else:
solution1[x][y] = self.solutionsCollection[-1][x][y]
solution2[x][y] = self.solutionsCollection[-2][x][y]
bestSolution = self.MatrixGrade(solution1, solution2)
self.solutionsCollection.append(bestSolution)
if mode == 2: # Cols
for x in range(8, self.columns):
for y in range(8, pivot + 8):
solution1[x][y] = self.solutionsCollection[-2][x][y]
solution2[x][y] = self.solutionsCollection[-1][x][y]
for x in range(8, self.columns):
for y in range(pivot + 8, self.rows):
# if y == pivot + 8:
# solution1[x][y] = 20
# solution1[x][y] = 20
#else:
solution1[x][y] = self.solutionsCollection[-1][x][y]
solution2[x][y] = self.solutionsCollection[-2][x][y]
bestSolution = self.MatrixGrade(solution1, solution2)
self.solutionsCollection.append(bestSolution)
if mode == 3:
solution2 = self.random2d_array(2)
self.solutionsCollection.append(solution2)
#print(len(self.solutionsCollection))
self.solutionLoader()
def solutionLoader(self):
self.grid_array = self.solutionsCollection[-1]
def comparePopulation(self, solution):
# print(self.conditionsArray)
counter = 0
distanceAtColsFromDesiredPopulation = []
distanceAtRowsFromDesiredPopulation = []
for line in self.conditionsArray:
desiredPopulation = 0
for condition in line:
desiredPopulation += condition
#print(desiredPopulation)
if counter < 25: # Working on columns
population = 0
for y in range(8, self.rows):
population += solution[8 + counter][y]
#print("Actual population of column " + str(counter) + " is " + str(population)
#+ " while the desired population is: " + str(desiredPopulation))
distanceAtColsFromDesiredPopulation.append(population - desiredPopulation)
else: # Working on rows
population = 0
for x in range(8, self.columns):
population += solution[x][counter - 17]
#print("Actual population of row " + str(counter - 25) + " is " + str(population)
# + " while the desired population is: " + str(desiredPopulation))
distanceAtRowsFromDesiredPopulation.append(population - desiredPopulation)
counter += 1 # Proceeding to the next column/row.
distancesToReturn = []
distancesToReturn.append(distanceAtRowsFromDesiredPopulation)
distancesToReturn.append(distanceAtColsFromDesiredPopulation)
return distancesToReturn # This array consists of the following members: [0] At Rows, [1] At Cols.
# The following func transfers the population according to the requirements.
def populationManager(self, distanceFromDesiredPopulation, mode): # Mode 0 for cols, mode 1 for rows
minimum = distanceFromDesiredPopulation[0]
minimumIndex = 0
maximum = distanceFromDesiredPopulation[0]
maximumIndex = 0
for index in range(len(distanceFromDesiredPopulation)):
if distanceFromDesiredPopulation[index] < minimum and distanceFromDesiredPopulation[index] != 0:
minimum = distanceFromDesiredPopulation[index]
minimumIndex = index
if distanceFromDesiredPopulation[index] > maximum and distanceFromDesiredPopulation[index] != 0:
maximum = distanceFromDesiredPopulation[index]
maximumIndex = index
if minimum >= 0 or maximum <= 0: # The black cells in all candidates exceeding or failing the desired
return # We cannot do anything in those cases.
#print(minimumIndex)
#print(minimum)
self.populationMoverAboveBelow(minimumIndex, maximumIndex, mode)
def populationMoverAboveBelow(self, minimumIndex, maximumIndex, mode):
# print("Maximum: " + str(maximumIndex) + " Minimum: " + str(minimumIndex))
next = np.ndarray(shape=(self.size)).astype(int)
for x in range(self.rows):
for y in range(self.columns):
next[x][y] = self.grid_array[x][y]
# print(next)
if mode == 0: # Working on cols
for index in range(8, self.rows):
if self.grid_array[maximumIndex][index] - self.grid_array[minimumIndex][index] > 0: # The maximum cell is 1
# and the min in 0
next[maximumIndex + 8][index] = 0
next[minimumIndex + 8][index] = 1
break
if mode == 1: # Working on rows
for index in range(8, self.columns):
if self.grid_array[index][maximumIndex + 8] - self.grid_array[index][minimumIndex + 8] > 0:
next[index][maximumIndex + 8] = 0
next[index][minimumIndex + 8] = 1
break
self.grid_array = next # Here we affect the next grid
# print(".......................")
# print(counter)
def chuncksCounter(self, solution):
# Creates a list containing 2 lists, each of 25 items, all set to 0
w, h = 25, 2;
chuncksArray = [[0 for x in range(w)] for y in range(h)]
i = 0
j = 0
for row in range(8, self.rows):
rows_chuncks = []
rows_counter = 0
for column in range(8, self.columns):
if (solution[column][row] == 1):
rows_counter += 1
elif (solution[column][row] == 0):
if (rows_counter > 0):
rows_chuncks.append(rows_counter)
rows_counter = 0
if (rows_counter > 0):
rows_chuncks.append(rows_counter)
# if len(rows_chuncks) > 8: todo: Do something!
while(len(rows_chuncks) < 8):
rows_chuncks.insert(0, 0) # The first 0 reflects the position to which we insert the member - i.e the beginning of the array.
# The second 0 represents the member we're inserting.
chuncksArray[i][j] = rows_chuncks
j += 1
# print("Chuncks in row " + str(row-7) + " are: ")
# print(rows_chunc
i=1
j=0
for column in range(8, self.columns):
cols_chuncks = []
cols_counter = 0
for row in range(8, self.rows):
if (solution[column][row] == 1):
cols_counter += 1
elif (solution[column][row] == 0):
if (cols_counter > 0):
cols_chuncks.append(cols_counter)
cols_counter = 0
if (cols_counter > 0):
cols_chuncks.append(cols_counter)
# if len(cols_chuncks) > 8: todo: Do something!
while (len(cols_chuncks) < 8):
cols_chuncks.insert(0, 0)
chuncksArray[i][j] = cols_chuncks
j += 1
# print("Chuncks in column " + str(column-7) + " are: ")
# print(cols_chuncks)
return chuncksArray
def compareLists(self,listConditions,listActual):
#finalGrade=0.5
#GradeReduction = 0.0625/2
difference = 0
IsOverlyLong = False
if(len(listActual) > 8): # If there are more than 8 chunks in the solution, the IsOverlyLong flag raise.
#GradeReduction = 0.0625
IsOverlyLong = True
for i in range(8): # Checks how many chunks are different between the requirements and the actual solution.
if(listConditions[i] != listActual[i]):
difference += 1
return (difference,IsOverlyLong)
def compareConditions(self, solution):
# Creates a list containing 2 lists, each of 25 items, all set to 0
w, h = 25, 2;
compareConditionsArray =[[0 for x in range(w)] for y in range(h)]
i = 0
chuncksArray = self.chuncksCounter(solution)
for line in self.conditionsArray:
if i < 25: # Working on cols
result = self.compareLists(line, chuncksArray[1][i]) # Returns a Tuple consisting of: [0] the number of chunks different
# [1] A flag indicates if there are more than 8 chucks in a given row/col in the solution.
if (result[1]): # result[1] consists of the flag, (True or false). If Flag == True then...
compareConditionsArray[1][i] = 0.5 - (0.0625) * result[0] # If the flag is raised the reduction is harsher.
else:
compareConditionsArray[1][i] = 0.5 - (0.0625 / 2) * result[0]
#compareConditionsArray[0][i] = len(set(chuncksArray[0][i]) & set(line)) #checks the number of common elements in 2 lists
else: # Working on rows
result = self.compareLists(line, chuncksArray[0][i - 25])
if(result[1]):
compareConditionsArray[0][i-25] = 0.5 - (0.0625) * result[0]
else:
compareConditionsArray[0][i - 25] = 0.5 - (0.0625/2) * result[0]
#compareConditionsArray[1][i-25] = len(set(chuncksArray[0][i]) & set(line))
i +=1
return compareConditionsArray
def GradeForConditions(self, solution):
# Creates a list containing 2 lists, each of 25 items, all set to 0
# will be array for all grades for each column and row according to the conditions file order(meaning this array length will be 50)
w, h = 25, 2
gradesArray = [[0 for x in range(w)] for y in range(h)]
#grade will be build by 50% comparePopulation and 50% chuncks comparasion
comparePopulationArray = self.comparePopulation(solution) # consists population differences
compareConditionsArray = self.compareConditions(solution) #consists grades already
for i in range(0, 1):
for j in range(0, 25):
absoulte_difference = abs(comparePopulationArray[i][j])
if absoulte_difference == 0:
gradesArray[i][j] = 0.5 + compareConditionsArray[i][j]
else:
gradesArray[i][j] = 0.5 - 0.15*(absoulte_difference/10) + compareConditionsArray[i][j]
for i in range(0, 1):
'''for j in range(0, 25):
if i == 0:
print("Grade for row ")
else:
print("Grade for column ")
print(str(j) + " is " + str(gradesArray[i][j]))'''
return gradesArray
def gradeCalculator(self, gradesArray):
grade = 0
for i in range(0, 1):
for j in range(0, 25):
grade += gradesArray[i][j]
grade /= 50 # Calculating the average after going through the 2D array
return grade # A float
def MatrixGrade(self, solution1, solution2):
grade1 = self.gradeCalculator(self.GradeForConditions(solution1)) # Expecting to get a float to grade1
print("Grade1: " + str(grade1))
grade2 = self.gradeCalculator(self.GradeForConditions(solution2)) # Expecting to get a float to grade2
print("Grade2: " + str(grade2))
if(grade1 >= grade2):
bestSolution = solution1
else:
bestSolution = solution2
return bestSolution
|
[
"70212138+Kfirinb@users.noreply.github.com"
] |
70212138+Kfirinb@users.noreply.github.com
|
38d323f79569e894cdb87fba975a3678d9a6bd1a
|
8594583b6fc86584ebe8d3cb816e4e631fb3ecb5
|
/trackingEngine/__init__.py
|
21dcd60d79a9fecb5629d89a2c19758b3d0433e1
|
[] |
no_license
|
connorlane/powdertracking
|
69d1dd69ab4b88b455144e9d0b91871266d03195
|
3974e612059c0177942a5cac1545afb3a8eec33c
|
refs/heads/master
| 2020-06-13T07:01:39.716173
| 2018-06-20T02:00:37
| 2018-06-20T02:00:37
| 75,417,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
__all__ = ['track', 'util', 'analyze']
from trackingEngine import *
|
[
"connor.coward@gmail.com"
] |
connor.coward@gmail.com
|
9359f8d038fc1e8f77a7fe4f5f9df00e0e241f83
|
4c76c88f6421abb52a9e68ae48d33f32b0fcf5af
|
/traffic/run.py
|
2c7fe99b08d3c482e924668c6ed6193073860037
|
[
"Apache-2.0"
] |
permissive
|
aperturetechnology/starthinker
|
76ba1d8883dbcf32eff4164f57f4342d0b912b70
|
fd2d70e39f05cb29afc65b8a78ea38441e1e2b9a
|
refs/heads/master
| 2020-04-09T22:19:14.752457
| 2018-11-27T13:52:49
| 2018-11-27T13:52:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,569
|
py
|
###########################################################################
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
"""Main entry point of Bulkdozer.
"""
from util.project import project
from traffic.feed import Feed
from traffic.feed import FieldMap
from traffic.ad import AdDAO
from traffic.creative_assets import CreativeAssetDAO
from traffic.video_format import VideoFormatDAO
from traffic.creative_association import CreativeAssociationDAO
from traffic.creative import CreativeDAO
from traffic.campaign import CampaignDAO
from traffic.landing_page import LandingPageDAO
from traffic.placement import PlacementDAO
from traffic.event_tag import EventTagDAO
from traffic.store import store
from traffic.config import config
from traffic.logger import logger, timer
import datetime
import json
import sys
import traceback
from util.auth import get_service
video_format_dao = None
landing_page_dao = None
campaign_dao = None
creative_association_dao = None
creative_dao = None
placement_dao = None
creative_asset_dao = None
ad_dao = None
event_tag_dao = None
spreadsheet = None
def process_feed(feed_name, dao, print_field, msg='Processing'):
"""Processes a feed that represents a specific entity in the Bulkdozer feed.
Args:
feed_name: Name of the feed to process, refer to feed.py for the supported
feed names.
dao: The data access object to be used to interact with the CM API and
update, must match the entity being updated in CM, in the sense that the
required fields to fetch, create, and update the entity in CM must be
included in the feed.
print_field: Field that identifies the item, used to print status messages
to the Log tab of the Bulkdozer feed.
msg: Prefix message to use when writing to the Log tab of the Bulkdozer
feed, for instance we display Processing Campaign for campaign, and
Uploading Asset for assets.
"""
feed = Feed(project.task['auth'], project.task['sheet_id'], feed_name, spreadsheet=spreadsheet)
execute_feed(feed, dao, print_field, msg)
def execute_feed(feed, dao, print_field, msg='Processing'):
"""Executes a specific feed.
Args:
feed: Feed object representing the Bulkdozer feed to process.
dao: The data access object to be used to interact with the CM API and
update, must match the entity being updated in CM, in the sense that the
required fields to fetch, create, and update the entity in CM must be
included in the feed.
print_field: Field that identifies the item, used to print status messages
to the Log tab of the Bulkdozer feed.
msg: Prefix message to use when writing to the Log tab of the Bulkdozer
feed, for instance we display Processing Campaign for campaign, and
Uploading Asset for assets.
"""
try:
for feed_item in feed.feed:
value = str(feed_item[print_field])
print '%s %s' % (msg, value.encode('utf-8'))
logger.log('%s %s' % (msg, value.encode('utf-8')))
dao.process(feed_item)
finally:
feed.update()
def setup():
"""Sets up Bulkdozer configuration and required object to execute the job.
"""
# Setting up required objects and parsing parameters
config.auth = project.task['auth']
config.trix_id = project.task.get('store', {}).get('sheet_id',
project.task['sheet_id'])
config.load()
logger.auth = project.task['auth']
logger.trix_id = project.task.get('logger', {}).get('sheet_id',
project.task['sheet_id'])
logger.buffered = True
def init_daos():
global video_format_dao
global landing_page_dao
global campaign_dao
global creative_association_dao
global creative_dao
global placement_dao
global creative_asset_dao
global ad_dao
global event_tag_dao
global spreadsheet
service = get_service('sheets', 'v4', project.task['auth'])
spreadsheet = service.spreadsheets().get(
spreadsheetId=project.task['sheet_id']).execute()
store.auth = project.task['auth']
store.trix_id = project.task.get('store', {}).get('sheet_id',
project.task['sheet_id'])
store.load_id_map()
video_format_dao = VideoFormatDAO(project.task['auth'],
project.task['dcm_profile_id'])
landing_page_dao = LandingPageDAO(project.task['auth'],
project.task['dcm_profile_id'])
campaign_dao = CampaignDAO(project.task['auth'],
project.task['dcm_profile_id'])
creative_association_dao = CreativeAssociationDAO(
project.task['auth'], project.task['dcm_profile_id'])
creative_dao = CreativeDAO(project.task['auth'],
project.task['dcm_profile_id'])
placement_dao = PlacementDAO(project.task['auth'],
project.task['dcm_profile_id'])
creative_asset_dao = CreativeAssetDAO(
project.task['auth'], project.task['dcm_profile_id'], project.id)
ad_dao = AdDAO(project.task['auth'], project.task['dcm_profile_id'])
event_tag_dao = EventTagDAO(project.task['auth'],
project.task['dcm_profile_id'])
def assets():
"""Processes assets.
"""
process_feed('creative_asset_feed', creative_asset_dao,
FieldMap.CREATIVE_ASSET_FILE_NAME, 'Uploading creative asset')
def landing_pages():
"""Processes landing pages.
"""
process_feed('landing_page_feed', landing_page_dao,
FieldMap.CAMPAIGN_LANDING_PAGE_NAME, 'Processing landing page')
def campaigns():
"""Processes campaigns.
"""
process_feed('campaign_feed', campaign_dao, FieldMap.CAMPAIGN_NAME,
'Processing campaign')
def event_tags():
"""Processes event tags.
"""
process_feed('event_tag_feed', event_tag_dao, FieldMap.EVENT_TAG_NAME,
'Processing event tag')
def placements():
"""Processes placements.
"""
placement_feed = Feed(project.task['auth'], project.task['sheet_id'],
'placement_feed', spreadsheet=spreadsheet)
pricing_schedule_feed = Feed(project.task['auth'], project.task['sheet_id'],
'placement_pricing_schedule_feed', spreadsheet=spreadsheet)
transcode_configs_feed = Feed(project.task['auth'], project.task['sheet_id'],
'transcode_configs_feed', spreadsheet=spreadsheet)
placement_dao.map_placement_transcode_configs(placement_feed.feed,
transcode_configs_feed.feed,
pricing_schedule_feed.feed)
execute_feed(placement_feed, placement_dao, FieldMap.PLACEMENT_NAME,
'Processing placement')
def creatives():
"""Processes creatives.
"""
creative_asset_feed = Feed(project.task['auth'], project.task['sheet_id'],
'creative_asset_feed', spreadsheet=spreadsheet)
creative_feed = Feed(project.task['auth'], project.task['sheet_id'],
'creative_feed', spreadsheet=spreadsheet)
third_party_url_feed = Feed(project.task['auth'], project.task['sheet_id'],
'third_party_url_feed', spreadsheet=spreadsheet)
creative_association_feed = Feed(project.task['auth'],
project.task['sheet_id'],
'creative_asset_association_feed', spreadsheet=spreadsheet)
creative_dao.map_creative_third_party_url_feeds(creative_feed.feed,
third_party_url_feed.feed)
creative_dao.map_creative_and_association_feeds(
creative_feed.feed, creative_association_feed.feed)
creative_dao.map_assets_feed(creative_asset_feed)
execute_feed(creative_feed, creative_dao, FieldMap.CREATIVE_NAME,
'Processing creative')
creative_association_feed.update()
third_party_url_feed.update()
process_feed('creative_campaign_association_feed', creative_association_dao,
FieldMap.CREATIVE_ID, 'Associating with campaign, creative id')
def ads():
"""Processes ads.
"""
placement_feed = Feed(project.task['auth'], project.task['sheet_id'],
'placement_feed', spreadsheet=spreadsheet)
event_tag_profile_feed = Feed(project.task['auth'], project.task['sheet_id'],
'event_tag_profile_feed', spreadsheet=spreadsheet)
ad_feed = Feed(project.task['auth'], project.task['sheet_id'], 'ad_feed', spreadsheet=spreadsheet)
ad_creative_assignment_feed = Feed(project.task['auth'],
project.task['sheet_id'],
'ad_creative_assignment_feed', spreadsheet=spreadsheet)
ad_placement_assignment_feed = Feed(project.task['auth'],
project.task['sheet_id'],
'ad_placement_assignment_feed', spreadsheet=spreadsheet)
ad_event_tag_assignment_feed = Feed(project.task['auth'],
project.task['sheet_id'],
'event_tag_ad_assignment_feed', spreadsheet=spreadsheet)
ad_dao.map_feeds(ad_feed.feed, ad_creative_assignment_feed.feed,
ad_placement_assignment_feed.feed,
ad_event_tag_assignment_feed.feed, placement_feed.feed,
event_tag_profile_feed.feed)
execute_feed(ad_feed, ad_dao, FieldMap.AD_ID, 'Processing Ad')
ad_creative_assignment_feed.update()
ad_placement_assignment_feed.update()
ad_event_tag_assignment_feed.update()
event_tag_profile_feed.update()
def traffic():
"""Main function of Bulkdozer, performs the Bulkdozer job
"""
if project.verbose:
print 'traffic'
try:
setup()
if config.mode in ['ALWAYS', 'ONCE']:
try:
logger.clear()
logger.log('Bulkdozer traffic job starting')
logger.log('Execution config is %s' % config.mode)
logger.flush()
if config.mode == 'ONCE':
config.mode = 'OFF'
config.update()
init_daos()
assets()
landing_pages()
campaigns()
event_tags()
placements()
creatives()
ads()
store.clear()
finally:
logger.log('Bulkdozer traffic job ended')
logger.flush()
store.save_id_map()
except Exception as error:
stack = traceback.format_exc()
print stack
logger.log(str(error))
logger.flush()
def test():
"""For development purposes when debugging a specific entity, this function is handy to run just that entity.
"""
setup()
init_daos()
creatives()
if __name__ == '__main__':
"""Main entry point of Bulkdozer.
"""
timer.start_timer('bulkdozer job')
project.load('traffic')
traffic()
timer.check_timer('bulkdozer job')
#test()
|
[
"kenjora@kenjora-macbookair.roam.corp.google.com"
] |
kenjora@kenjora-macbookair.roam.corp.google.com
|
cfb0c8d5e7940b94ee7ed94e61e281fd7031d601
|
048af27d13d71b2b36145dc7ae801401291b9919
|
/google-cloud-sdk/lib/googlecloudsdk/core/updater/schemas.py
|
9e766bad1cf0ff88f03183ccabb5d0fc05554d43
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
shippableImages/pi
|
e4c941e8a428be49f2958e565e063880f2eb1cea
|
b21a9231f70d394c935c0d7da1a260eb3abc88f5
|
refs/heads/master
| 2020-04-01T19:36:12.227033
| 2015-07-01T01:18:23
| 2015-07-01T01:18:23
| 36,627,764
| 0
| 2
| null | 2015-07-01T01:18:23
| 2015-05-31T23:59:39
|
Groff
|
UTF-8
|
Python
| false
| false
| 17,905
|
py
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""Contains object representations of the JSON data for components."""
import datetime
from googlecloudsdk.core.util import platforms
from googlecloudsdk.core.util import console_io
class Error(Exception):
"""Base exception for the schemas module."""
pass
class ParseError(Error):
"""An error for when a component snapshot cannot be parsed."""
pass
class DictionaryParser(object):
"""A helper class to parse elements out of a JSON dictionary."""
def __init__(self, cls, dictionary):
"""Initializes the parser.
Args:
cls: class, The class that is doing the parsing (used for error messages).
dictionary: dict, The JSON dictionary to parse.
"""
self.__cls = cls
self.__dictionary = dictionary
self.__args = {}
def Args(self):
"""Gets the dictionary of all parsed arguments.
Returns:
dict, The dictionary of field name to value for all parsed arguments.
"""
return self.__args
def _Get(self, field, default, required):
if required and field not in self.__dictionary:
raise ParseError('Required field [{0}] not found while parsing [{1}]'
.format(field, self.__cls))
return self.__dictionary.get(field, default)
def Parse(self, field, required=False, default=None, func=None):
"""Parses a single element out of the dictionary.
Args:
field: str, The name of the field to parse.
required: bool, If the field must be present or not (False by default).
default: str or dict, The value to use if a non-required field is not
present.
func: An optional function to call with the value before returning (if
value is not None). It takes a single parameter and returns a single
new value to be used instead.
Raises:
ParseError: If a required field is not found or if the field parsed is a
list.
"""
value = self._Get(field, default, required)
if value is not None:
if isinstance(value, list):
raise ParseError('Did not expect a list for field [{field}] in '
'component [{component}]'.format(
field=field, component=self.__cls))
if func:
value = func(value)
self.__args[field] = value
def ParseList(self, field, required=False, default=None, func=None):
"""Parses a element out of the dictionary that is a list of items.
Args:
field: str, The name of the field to parse.
required: bool, If the field must be present or not (False by default).
default: str or dict, The value to use if a non-required field is not
present.
func: An optional function to call with each value in the parsed list
before returning (if the list is not None). It takes a single parameter
and returns a single new value to be used instead.
Raises:
ParseError: If a required field is not found or if the field parsed is
not a list.
"""
value = self._Get(field, default, required)
if value:
if not isinstance(value, list):
raise ParseError('Expected a list for field [{0}] in component [{1}]'
.format(field, self.__cls))
if func:
value = [func(v) for v in value]
self.__args[field] = value
class DictionaryWriter(object):
"""Class to help writing these objects back out to a dictionary."""
def __init__(self, obj):
self.__obj = obj
self.__dictionary = {}
@staticmethod
def AttributeGetter(attrib):
def Inner(obj):
return getattr(obj, attrib)
return Inner
def Write(self, field, func=None):
"""Writes the given field to the dictionary.
This gets the value of the attribute named field from self, and writes that
to the dictionary. The field is not written if the value is not set.
Args:
field: str, The field name.
func: An optional function to call on the value of the field before
writing it to the dictionary.
"""
value = getattr(self.__obj, field)
if value is None:
return
if func:
value = func(value)
self.__dictionary[field] = value
def WriteList(self, field, func=None):
"""Writes the given list field to the dictionary.
This gets the value of the attribute named field from self, and writes that
to the dictionary. The field is not written if the value is not set.
Args:
field: str, The field name.
func: An optional function to call on each value in the list before
writing it to the dictionary.
"""
def ListMapper(values):
return [func(v) for v in values]
list_func = ListMapper if func else None
self.Write(field, func=list_func)
def Dictionary(self):
return self.__dictionary
class ComponentDetails(object):
"""Encapsulates some general information about the component.
Attributes:
display_name: str, The user facing name of the component.
description: str, A little more details about what the component does.
"""
@classmethod
def FromDictionary(cls, dictionary):
p = DictionaryParser(cls, dictionary)
p.Parse('display_name', required=True)
p.Parse('description', required=True)
return cls(**p.Args())
def ToDictionary(self):
w = DictionaryWriter(self)
w.Write('display_name')
w.Write('description')
return w.Dictionary()
def __init__(self, display_name, description):
self.display_name = display_name
self.description = description
class ComponentVersion(object):
"""Version information for the component.
Attributes:
build_number: int, The unique, monotonically increasing version of the
component.
version_string: str, The user facing version for the component.
"""
@classmethod
def FromDictionary(cls, dictionary):
p = DictionaryParser(cls, dictionary)
p.Parse('build_number', required=True)
p.Parse('version_string', required=True)
return cls(**p.Args())
def ToDictionary(self):
w = DictionaryWriter(self)
w.Write('build_number')
w.Write('version_string')
return w.Dictionary()
def __init__(self, build_number, version_string):
self.build_number = build_number
self.version_string = version_string
class ComponentData(object):
"""Information on the data source for the component.
Attributes:
type: str, The type of the source of this data (i.e. tar).
source: str, The hosted location of the component.
size: int, The size of the component in bytes.
checksum: str, The hex digest of the archive file.
contents_checksum: str, The hex digest of the contents of all files in the
archive.
"""
@classmethod
def FromDictionary(cls, dictionary):
p = DictionaryParser(cls, dictionary)
p.Parse('type', required=True)
p.Parse('source', required=True)
p.Parse('size')
p.Parse('checksum')
p.Parse('contents_checksum')
return cls(**p.Args())
def ToDictionary(self):
w = DictionaryWriter(self)
w.Write('type')
w.Write('source')
w.Write('size')
w.Write('checksum')
w.Write('contents_checksum')
return w.Dictionary()
# pylint: disable=redefined-builtin, params must match JSON names
def __init__(self, type, source, size, checksum, contents_checksum):
self.type = type
self.source = source
self.size = size
self.checksum = checksum
self.contents_checksum = contents_checksum
class ComponentPlatform(object):
"""Information on the applicable platforms for the component.
Attributes:
operating_systems: [platforms.OperatingSystem], The operating systems this
component is valid on. If [] or None, it is valid on all operating
systems.
architectures: [platforms.Architecture], The architectures this component is
valid on. If [] or None, it is valid on all architectures.
"""
@classmethod
def FromDictionary(cls, dictionary):
p = DictionaryParser(cls, dictionary)
p.ParseList('operating_systems', func=platforms.OperatingSystem.FromId)
p.ParseList('architectures', func=platforms.Architecture.FromId)
return cls(**p.Args())
def ToDictionary(self):
w = DictionaryWriter(self)
w.WriteList('operating_systems',
func=DictionaryWriter.AttributeGetter('id'))
w.WriteList('architectures', func=DictionaryWriter.AttributeGetter('id'))
return w.Dictionary()
def __init__(self, operating_systems, architectures):
"""Creates a new ComponentPlatform.
Args:
operating_systems: list(platforms.OperatingSystem), The OSes this
component should be installed on. None indicates all OSes.
architectures: list(platforms.Architecture), The processor architectures
this component works on. None indicates all architectures.
"""
self.operating_systems = operating_systems
self.architectures = architectures
def Matches(self, platform):
"""Determines if the platform for this component matches the environment.
For both operating system and architecture, it is a match if either the
given is None or none are registered, or if the given is one of those
registered. In order to match, both operating system and architecture must
match.
Args:
platform: platform.Platform, The platform that must be matched. None will
always match.
Returns:
True if it matches or False if not.
"""
if not platform:
return True
if platform.operating_system and self.operating_systems:
if platform.operating_system not in self.operating_systems:
return False
if platform.architecture and self.architectures:
if platform.architecture not in self.architectures:
return False
return True
def IntersectsWith(self, other):
"""Determines if this platform intersects with the other platform.
Platforms intersect if they can both potentially be installed on the same
system.
Args:
other: ComponentPlatform, The other component platform to compare against.
Returns:
bool, True if there is any intersection, False otherwise.
"""
return (self.__CollectionsIntersect(self.operating_systems,
other.operating_systems) and
self.__CollectionsIntersect(self.architectures,
other.architectures))
def __CollectionsIntersect(self, collection1, collection2):
"""Determines if the two collections intersect.
The collections intersect if either or both are None or empty, or if they
contain an intersection of elements.
Args:
collection1: [] or None, The first collection.
collection2: [] or None, The second collection.
Returns:
bool, True if there is an intersection, False otherwise.
"""
# If either is None (valid for all) then they definitely intersect.
if not collection1 or not collection2:
return True
# Both specify values, return if there is at least one intersecting.
return set(collection1) & set(collection2)
class Component(object):
"""Data type for an entire component.
Attributes:
id: str, The unique id for this component.
details: ComponentDetails, More descriptions of the components.
version: ComponentVersion, Information about the version of this component.
is_hidden: bool, True if this should be hidden from the user.
is_required: bool, True if this component must always be installed.
is_configuration: bool, True if this should be displayed in the packages
section of the component manager.
data: ComponentData, Information about where to get the component from.
platform: ComponentPlatform, Information about what operating systems and
architectures the compoonent is valid on.
dependencies: [str], The other components required by this one.
"""
@classmethod
def FromDictionary(cls, dictionary):
"""Converts a dictionary object to an instantiated Component class.
Args:
dictionary: The Dictionary to to convert from.
Returns:
A Component object initialized from the dictionary object.
"""
p = DictionaryParser(cls, dictionary)
p.Parse('id', required=True)
p.Parse('details', required=True, func=ComponentDetails.FromDictionary)
p.Parse('version', required=True, func=ComponentVersion.FromDictionary)
p.Parse('is_hidden', default=False)
p.Parse('is_required', default=False)
p.Parse('is_configuration', default=False)
p.Parse('data', func=ComponentData.FromDictionary)
p.Parse('platform', default={}, func=ComponentPlatform.FromDictionary)
p.ParseList('dependencies', default=[])
return cls(**p.Args())
def ToDictionary(self):
"""Converts a Component object to a Dictionary object.
Returns:
A Dictionary object initialized from self.
"""
w = DictionaryWriter(self)
w.Write('id')
w.Write('details', func=ComponentDetails.ToDictionary)
w.Write('version', func=ComponentVersion.ToDictionary)
w.Write('is_hidden')
w.Write('is_required')
w.Write('is_configuration')
w.Write('data', func=ComponentData.ToDictionary)
w.Write('platform', func=ComponentPlatform.ToDictionary)
w.WriteList('dependencies')
return w.Dictionary()
# pylint: disable=redefined-builtin, params must match JSON names
def __init__(self, id, details, version, dependencies, data, is_hidden,
is_required, is_configuration, platform):
self.id = id
self.details = details
self.version = version
self.is_hidden = is_hidden
self.is_required = is_required
self.is_configuration = is_configuration
self.platform = platform
self.data = data
self.dependencies = dependencies
@staticmethod
def TablePrinter():
"""Gets a console_io.TablePrinter for printing a Component."""
headers = (None, None, None)
justification = (console_io.TablePrinter.JUSTIFY_LEFT,
console_io.TablePrinter.JUSTIFY_RIGHT,
console_io.TablePrinter.JUSTIFY_RIGHT)
return console_io.TablePrinter(headers, justification=justification)
def AsTableRow(self):
"""Generates a tuple of this component's details that can be printed.
Returns:
tuple(str, str, str), The name, version , and size of this component.
"""
return (self.details.display_name,
self.version.version_string,
self.SizeString())
def SizeString(self):
"""Generates a string describing the size of the component in MB.
Returns:
str, The size string or the empty string if there is no data for this
component.
"""
if self.data and self.data.size:
size = self.data.size / 1048576.0 # 1024^2
if size < 1:
return '< 1 MB'
return '{size:0.1f} MB'.format(size=size)
return ''
class SchemaVersion(object):
"""Information about the schema version of this snapshot file.
Attributes:
version: int, The schema version number. A different number is considered
incompatible.
no_update: bool, True if this installation should not attempted to be
updated.
message: str, A message to display to the user if they are updating to this
new schema version.
url: str, The URL to grab a fresh Cloud SDK bundle.
"""
@classmethod
def FromDictionary(cls, dictionary):
p = DictionaryParser(cls, dictionary)
p.Parse('version', required=True)
p.Parse('no_update', default=False)
p.Parse('message')
p.Parse('url', required=True)
return cls(**p.Args())
def ToDictionary(self):
w = DictionaryWriter(self)
w.Write('version')
w.Write('no_update')
w.Write('message')
w.Write('url')
return w.Dictionary()
def __init__(self, version, no_update, message, url):
self.version = version
self.no_update = no_update
self.message = message
self.url = url
class SDKDefinition(object):
"""Top level object for then entire component snapshot.
Attributes:
revision: int, The unique, monotonically increasing version of the snapshot.
components: [Component], The component definitions.
"""
REVISION_FORMAT_STRING = '%Y%m%d%H%M%S'
@classmethod
def FromDictionary(cls, dictionary):
p = cls._ParseBase(dictionary)
p.Parse('revision', required=True)
p.Parse('release_notes_url')
p.ParseList('components', required=True, func=Component.FromDictionary)
return cls(**p.Args())
@classmethod
def SchemaVersion(cls, dictionary):
return cls._ParseBase(dictionary).Args()['schema_version']
@classmethod
def _ParseBase(cls, dictionary):
p = DictionaryParser(cls, dictionary)
p.Parse('schema_version', default={'version': 1, 'url': ''},
func=SchemaVersion.FromDictionary)
return p
def ToDictionary(self):
w = DictionaryWriter(self)
w.Write('revision')
w.Write('release_notes_url')
w.Write('schema_version', func=SchemaVersion.ToDictionary)
w.WriteList('components', func=Component.ToDictionary)
return w.Dictionary()
def __init__(self, revision, schema_version, release_notes_url, components):
self.revision = revision
self.schema_version = schema_version
self.release_notes_url = release_notes_url
self.components = components
def LastUpdatedString(self):
try:
last_updated = datetime.datetime.strptime(
str(self.revision), SDKDefinition.REVISION_FORMAT_STRING)
return last_updated.strftime('%Y/%m/%d')
except ValueError:
return 'Unknown'
def Merge(self, sdk_def):
current_components = dict((c.id, c) for c in self.components)
for c in sdk_def.components:
if c.id in current_components:
self.components.remove(current_components[c.id])
current_components[c.id] = c
self.components.append(c)
|
[
"devashish.86@gmail.com"
] |
devashish.86@gmail.com
|
64d4981a4eea5d22ee26cc21558b1d0fc30195a9
|
65860dcff48bd742561e3972a9dcefc64bfea5c0
|
/validate_without_simclr.py
|
31acbb59ea539871422cb7eb7e37f204a9c373f7
|
[
"Apache-2.0"
] |
permissive
|
BiQiWHU/pytorch-image-models-with-simclr
|
9f482927c8af0cc4a6d2e1cd2cf6319ca57f1f2d
|
0a3182be5ef6e008b73bb9f9ce756d26f3dab7c0
|
refs/heads/master
| 2023-08-17T10:39:14.778961
| 2021-09-13T01:28:08
| 2021-09-13T01:28:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,264
|
py
|
#!/usr/bin/env python3
""" ImageNet Validation Script
This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained
models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes
canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.
Hacked together by Ross Wightman (https://github.com/rwightman)
Modified by YANG Ruixin for multi-label classification
2021/03/18
https://github.com/yang-ruixin
yang_ruixin@126.com (in China)
rxn.yang@gmail.com (out of China)
"""
# ================================
from ptflops import get_model_complexity_info
from timm.data import DatasetAttributes, DatasetML
from timm.models import MultiLabelModel
import datetime
# ================================
import argparse
import os
import csv
import glob
import time
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
from collections import OrderedDict
from contextlib import suppress
from timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models
from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet
from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_legacy
has_apex = False
try:
from apex import amp
has_apex = True
except ImportError:
pass
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('validate')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--split', metavar='NAME', default='validation',
help='dataset split (default: validation)')
parser.add_argument('--model', '-m', metavar='NAME', default='dpn92',
help='model architecture (default: dpn92)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--no-test-pool', dest='no_test_pool', action='store_true',
help='disable test time pool')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--amp', action='store_true', default=False,
help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--tf-preprocessing', action='store_true', default=False,
help='Use Tensorflow preprocessing pipeline (require CPU TF installed')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true',
help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance')
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME',
help='Real labels JSON file for imagenet evaluation')
parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME',
help='Valid label indices txt file for validation of partial label space')
def validate(args):
# ================================================================
attributes_path = args.data + '/all.csv'
test_path = args.data + '/val.csv' # val.csv or test.csv
# ================================================================
# might as well try to validate something
args.pretrained = args.pretrained or not args.checkpoint
args.prefetcher = not args.no_prefetcher
amp_autocast = suppress # do nothing
if args.amp:
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
else:
_logger.warning("Neither APEX or Native Torch AMP is available.")
assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set."
if args.native_amp:
amp_autocast = torch.cuda.amp.autocast
_logger.info('Validating in mixed precision with native PyTorch AMP.')
elif args.apex_amp:
_logger.info('Validating in mixed precision with NVIDIA APEX AMP.')
else:
_logger.info('Validating in float32. AMP not enabled.')
if args.legacy_jit:
set_jit_legacy()
# create model
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
in_chans=3,
global_pool=args.gp,
scriptable=args.torchscript)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
data_config = resolve_data_config(vars(args), model=model, use_test_size=True)
test_time_pool = False
if not args.no_test_pool:
model, test_time_pool = apply_test_time_pool(model, data_config, use_test_size=True)
model = model.cuda()
# ================================
attributes = DatasetAttributes(attributes_path)
model = MultiLabelModel(model,
n_color_classes=attributes.num_colors,
n_gender_classes=attributes.num_genders,
n_article_classes=attributes.num_articles).cuda()
# ================================
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
param_count = sum([m.numel() for m in model.parameters()])
_logger.info('Model %s created, param count: %d' % (args.model, param_count))
# ================================
# code block used to get model FLOPs and number of parameters
# img_size = (3, 260, 260) # ================================
# flops, params = get_model_complexity_info(model, img_size, as_strings=True, print_per_layer_stat=False)
# print('FLOPs:', flops)
# print('params:', params)
# ================================
# ================================
# code block used to save model weights only, and an example to show how to load weights
torch.save(model.state_dict(), './output/weightOnly.pth')
model = create_model(
args.model,
in_chans=3,
pretrained=False,
checkpoint_path='',
scriptable=args.torchscript)
model = MultiLabelModel(model,
n_color_classes=attributes.num_colors,
n_gender_classes=attributes.num_genders,
n_article_classes=attributes.num_articles).cuda()
model.load_state_dict(torch.load('./output/weightOnly.pth'))
# ================================
if args.torchscript:
torch.jit.optimized_execution(True)
model = torch.jit.script(model)
if args.apex_amp:
model = amp.initialize(model, opt_level='O1')
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
# ================================
# criterion = nn.CrossEntropyLoss().cuda()
loss_fn = nn.CrossEntropyLoss().cuda()
# dataset = create_dataset(
# root=args.data, name=args.dataset, split=args.split,
# load_bytes=args.tf_preprocessing, class_map=args.class_map)
dataset = DatasetML(test_path, attributes)
num_of_data = len(dataset)
print('number of data:', num_of_data)
# ================================
if args.valid_labels:
with open(args.valid_labels, 'r') as f:
valid_labels = {int(line.rstrip()) for line in f}
valid_labels = [i in valid_labels for i in range(args.num_classes)]
else:
valid_labels = None
if args.real_labels:
real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels)
else:
real_labels = None
crop_pct = 1.0 if test_time_pool else data_config['crop_pct']
loader = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
pin_memory=args.pin_mem,
tf_preprocessing=args.tf_preprocessing)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
# warmup, reduce variability of first batch time, especially for comparing torchscript vs non
input = torch.randn((args.batch_size,) + data_config['input_size']).cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
model(input)
end = time.time()
acc1_color = acc1_gender = acc1_article = 0 # ================================
# ================================
model_inference_time = 0
num_of_batch = 0
# ================================
for batch_idx, (input, target) in enumerate(loader):
if args.no_prefetcher:
target = target.cuda()
input = input.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
# compute output
with amp_autocast():
# ================================
# output = model(input)
t1 = datetime.datetime.now()
output = model(input)
t2 = datetime.datetime.now()
num_of_batch += 1
if num_of_batch > 1:
model_inference_time += (t2 - t1).microseconds
# ================================
if valid_labels is not None:
output = output[:, valid_labels]
# ================================
# loss = criterion(output, target)
loss = model.get_loss(loss_fn, output, target)
# ================================
if real_labels is not None:
real_labels.add_result(output)
# measure accuracy and record loss
# ================================
# acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))
acc1, acc5, acc1_for_each_label = model.get_accuracy(accuracy, output, target, topk=(1, 5)) # topk=(1, 2) ================================
percentage = len(input) / num_of_data
acc1_color += acc1_for_each_label['color'] * percentage
acc1_gender += acc1_for_each_label['gender'] * percentage
acc1_article += acc1_for_each_label['article'] * percentage
# ================================
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '
'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(
batch_idx, len(loader), batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses, top1=top1, top5=top5))
# ================================
if num_of_batch > 1:
print('model inference time per batch in milliseconds',
round(model_inference_time / (num_of_batch - 1) / 1000, 2))
# ================================
if real_labels is not None:
# real labels mode replaces topk values at the end
top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5) # k=2 ================================
else:
top1a, top5a = top1.avg, top5.avg
results = OrderedDict(
top1=round(top1a, 4), top1_err=round(100 - top1a, 4),
top5=round(top5a, 4), top5_err=round(100 - top5a, 4),
param_count=round(param_count / 1e6, 2),
img_size=data_config['input_size'][-1],
cropt_pct=crop_pct,
interpolation=data_config['interpolation'])
# ================================
_logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f}) acc1_color {:.3f} acc1_gender {:.3f} acc1_article {:.3f}'.format(
results['top1'], results['top1_err'], results['top5'], results['top5_err'], acc1_color, acc1_gender, acc1_article))
# ================================
return results
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if os.path.isdir(args.checkpoint):
# validate all checkpoints in a path with same model
checkpoints = glob.glob(args.checkpoint + '/*.pth.tar')
checkpoints += glob.glob(args.checkpoint + '/*.pth')
model_names = list_models(args.model)
model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]
else:
if args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(pretrained=True, exclude_filters=['*in21k'])
model_cfgs = [(n, '') for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model)
model_cfgs = [(n, '') for n in model_names]
if len(model_cfgs):
results_file = args.results_file or './results-all.csv'
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
start_batch_size = args.batch_size
for m, c in model_cfgs:
batch_size = start_batch_size
args.model = m
args.checkpoint = c
result = OrderedDict(model=args.model)
r = {}
while not r and batch_size >= args.num_gpu:
torch.cuda.empty_cache()
try:
args.batch_size = batch_size
print('Validating with batch size: %d' % args.batch_size)
r = validate(args)
except RuntimeError as e:
if batch_size <= args.num_gpu:
print("Validation failed with no ability to reduce batch size. Exiting.")
raise e
batch_size = max(batch_size // 2, args.num_gpu)
print("Validation failed, reducing batch size by 50%")
result.update(r)
if args.checkpoint:
result['checkpoint'] = args.checkpoint
results.append(result)
except KeyboardInterrupt as e:
pass
results = sorted(results, key=lambda x: x['top1'], reverse=True)
if len(results):
write_results(results_file, results)
else:
validate(args)
def write_results(results_file, results):
with open(results_file, mode='w') as cf:
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
|
[
"foreverv32@msn.com"
] |
foreverv32@msn.com
|
7c13f8b32a85e3d13a088b4efe58dc8b03f7eaab
|
d29991b90b528baa04cb78b312abef799cf98bab
|
/src/canto/tests/tests.py
|
f5886e772f283a56ef0da571f662373f4431364e
|
[
"MIT"
] |
permissive
|
RaphaelKimmig/django-canto
|
9ff283eaf9a2bc30a4b2a894c73c20278fff9b62
|
033dd6fcaf96fed021a726b5b042632fd1504b74
|
refs/heads/master
| 2023-01-12T01:06:05.693214
| 2023-01-10T10:22:14
| 2023-01-10T10:22:14
| 154,282,140
| 0
| 1
|
MIT
| 2023-01-10T10:22:15
| 2018-10-23T07:27:03
|
Python
|
UTF-8
|
Python
| false
| false
| 4,410
|
py
|
from datetime import timedelta
import mock
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.test import override_settings
from django.urls import reverse
from django.utils import timezone
from django.utils.html import escape
from django_dynamic_fixture import G
from django_webtest import WebTest
from canto.services import _get_oauth_state, _get_canto_settings, get_canto_client
class FakeResponse:
def __init__(self, data, ok=True) -> None:
super().__init__()
self.data = data
self.ok = ok
def json(self):
return self.data
@override_settings(
CANTO_API_URL="https://example.canto.com",
CANTO_APP_ID="XXX",
CANTO_APP_SECRET="ZZZ",
)
class CantoViewTest(WebTest):
def setUp(self):
self.user = G(get_user_model())
self.user.user_permissions.add(
Permission.objects.get(codename="browse_library")
)
self.user.user_permissions.add(
Permission.objects.get(codename="change_cantosettings")
)
def test_settings_default_to_not_connected(self):
response = self.app.get(reverse("canto:settings"), user=self.user)
self.assertContains(response, "You are not connected")
self.assertContains(
response,
escape(
get_canto_client().get_oauth_url(
_get_oauth_state(self.user), response.request.url
)
),
)
def test_settings_permissions(self):
unauthorized_user = G(get_user_model())
self.app.get(reverse("canto:settings"), user=unauthorized_user, status=403)
self.app.post(reverse("canto:disconnect"), user=unauthorized_user, status=403)
self.app.post(
reverse("canto:refresh-token"), user=unauthorized_user, status=403
)
def test_browse_permissions(self):
unauthorized_user = G(get_user_model())
self.app.get(reverse("canto:library"), user=unauthorized_user, status=403)
self.app.get(reverse("canto:tree-json"), user=unauthorized_user, status=403)
self.app.get(
reverse("canto:search-json", kwargs={"query": "something"}),
user=unauthorized_user,
status=403,
)
self.app.get(
reverse("canto:album-json", kwargs={"album_id": "123"}),
user=unauthorized_user,
status=403,
)
self.app.get(
reverse("canto:binary", kwargs={"url": "foo"}),
user=unauthorized_user,
status=403,
)
def test_oauth_confirmation_view(self):
oauth_state = _get_oauth_state(self.user)
settings_page = self.app.get(
reverse("canto:settings") + "?code=CANTO_CODE&state=" + oauth_state,
user=self.user,
)
self.assertContains(settings_page, "Please confirm the connection to canto")
canto_response = FakeResponse(
{
"accessToken": "i-grant-access",
"state": oauth_state,
"expiresIn": 3600,
"refreshToken": "feel-refreshed",
}
)
now = timezone.now()
with mock.patch("canto.client.requests.post", return_value=canto_response):
with mock.patch("canto.client.now", return_value=now):
confirm_response = settings_page.form.submit()
self.assertContains(confirm_response.follow(), "You are connected to canto")
canto_settings = _get_canto_settings()
self.assertEqual(canto_settings.access_token, "i-grant-access")
self.assertEqual(canto_settings.refresh_token, "feel-refreshed")
self.assertEqual(
canto_settings.token_valid_until, now + timedelta(seconds=3600)
)
def test_oauth_disconnect_view(self):
canto_settings = _get_canto_settings()
canto_settings.refresh_token = "refresh"
canto_settings.access_token = "access"
canto_settings.token_valid_until = timezone.now() + timedelta(seconds=3600)
canto_settings.save()
response = self.app.get(reverse("canto:settings"), user=self.user)
self.assertContains(response, "You are connected to canto")
response = response.forms["canto-disconnect"].submit()
self.assertContains(response.follow(), "You are not connected")
|
[
"raphael.kimmig@ampad.de"
] |
raphael.kimmig@ampad.de
|
67af1a0893165e7b7b57f691c96457c44c67e6a6
|
e204ab353fefee5ad31fa34d619856bb933f08f9
|
/crackit/crack/migrations/0001_initial.py
|
91a3aab71215f6a123a41dc077c7cfab41dc3d11
|
[] |
no_license
|
Sanketdave12/Crackit
|
e49aec48f83fb1543a529839797ad2681c4fac14
|
e76b2136b2f69965cb70bda0ff4a13d1c2b346aa
|
refs/heads/master
| 2023-03-01T06:19:27.786741
| 2021-02-15T18:25:42
| 2021-02-15T18:25:42
| 339,169,372
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
# Generated by Django 3.1.6 on 2021-02-10 12:40
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=55)),
('email', models.EmailField(max_length=255)),
('phone_no', models.IntegerField(blank=True, null=True, validators=[django.core.validators.RegexValidator('^0?[5-9]{1}\\d{9}$')])),
('website', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('message', models.TextField()),
],
options={
'ordering': ['-pk'],
},
),
]
|
[
"sddave1998@gmail.com"
] |
sddave1998@gmail.com
|
c288924a70713b4e1b5787b87198c35dad35fe31
|
caa1ab93f3aa4d70fe50f166472687242a084af7
|
/Semana 4/metod.py
|
b2c214f8f0bc4f901ad318c65b58f261fa79ea82
|
[] |
no_license
|
Mau5trakt/PCC
|
dd84b7672cfbe832ec37f1d4cccca7463b17d830
|
cc2906c6cd56e78e50bc449539136d2569589d2e
|
refs/heads/main
| 2023-05-08T07:35:45.843865
| 2021-06-01T06:45:14
| 2021-06-01T06:45:14
| 365,622,323
| 0
| 0
| null | 2021-06-01T01:29:39
| 2021-05-08T22:25:51
|
Python
|
UTF-8
|
Python
| false
| false
| 569
|
py
|
"""
use of the diferent string methods on
pyhton
"""
#Count
var1 = "Quisiera Saber Cuantas Veces Se Repite La Letra E En Esta Cadena De Caracteres"
var1 = var1.lower()
print(var1.count("e"))
#lower and upper
print(var1.upper())
print(var1.lower())
#endswith saber si una cadena termina con el valor que le insertamos
print(var1.endswith("res"))
print(var1.endswith("restop"))
#isnumeric -> Saber si una variable es meramente numérica o no
print(var1.isnumeric())
var2= "12345"
print(var2.isnumeric())
#join
a = "este es otro ejemplo"
print(a.split()), print(type(a))
|
[
"80871251+Mau5trakt@users.noreply.github.com"
] |
80871251+Mau5trakt@users.noreply.github.com
|
71543286cb99f74ac4322634e4b336aa2b0c0148
|
e121dcc5d23e225891420e730549b9cc7ebe8e88
|
/python/lib/direct/extensions_native/Mat3.py
|
3bab7b4f486b6202dc73a6af14145fae4d9977b1
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
PlumpMath/panda3d-3
|
4f4cf7627eddae9b7f30795e0a0657b01fdf670d
|
5c0be0e1cd46b422d28d5b81ffb1e8b28c3ac914
|
refs/heads/master
| 2021-01-25T06:55:36.209044
| 2014-09-29T14:24:53
| 2014-09-29T14:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
from panda3d.direct.extensions_native.Helpers import *
Dtool_PreloadDLL("panda")
from panda import *
####################################################################
#Dtool_funcToMethod(func, class)
#del func
#####################################################################
"""
Mat3-extensions module: contains methods to extend functionality
of the LMatrix3f class.
"""
def pPrintValues(self):
"""
Pretty print
"""
return "\n%s\n%s\n%s" % (
self.getRow(0).pPrintValues(), self.getRow(1).pPrintValues(), self.getRow(2).pPrintValues())
Dtool_funcToMethod(pPrintValues, Mat3)
del pPrintValues
#####################################################################
|
[
"ralf.kaestner@gmail.com"
] |
ralf.kaestner@gmail.com
|
a95c5997af888fe9bc451f3def8f2ef3bf00f683
|
960aa8eb95582cb88f02f84c45ae677dea699200
|
/crawler/url_encode.py
|
78624a751ded5ba93ef4c5a1a9a9380e5d032ec6
|
[] |
no_license
|
laisj/Toolkit
|
c5162aa9d83e22089ad0464a1a7d052f34342ec6
|
a52ee0efb23c395c26bdec63dd084df83153c5f9
|
refs/heads/master
| 2021-06-20T11:35:35.953768
| 2021-02-19T11:04:01
| 2021-02-19T11:04:01
| 14,269,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
# -*- coding utf-8 -*-
import urllib
import sys
if __name__ == '__main__':
str = "??"
print str
params = {}
params['name'] = str.encode("UTF-8")
print urllib.urlencode(params)
|
[
"lai.sijia@immomo.com"
] |
lai.sijia@immomo.com
|
7f0c1bc79e1d444b9771344e9e1420c1239df876
|
0f2242bdd286cc2a7b7feffafc9fd5c94cd5647f
|
/transfer_rl/student_learner.py
|
79774129b4339fd79310c534daff2a7139409aa2
|
[] |
no_license
|
henghuiz-zz/transfer_reinforcement_learning
|
7efd97329a6463816cfd91134b3869885290eea5
|
fb83457d0b023948146fd3ed7e8852c038368a6d
|
refs/heads/master
| 2022-10-26T23:08:39.727402
| 2017-05-02T00:18:50
| 2017-05-02T00:18:50
| 89,974,256
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
import tensorflow as tf
class StudentLearner(object):
def __init__(self, network, optimizer):
# Placeholders
self.input_x = network.x
with tf.variable_scope(network.scope):
self.input_y = tf.placeholder(
'float', [None, network.number_action], name='Input_Action')
self.loss = tf.losses.softmax_cross_entropy(
onehot_labels=self.input_y, logits=network.logits
)
self.train_step = optimizer.minimize(self.loss)
|
[
"zhhtc200@gmail.com"
] |
zhhtc200@gmail.com
|
e0e3a1f1524fa6c4ecda2f1aa230f8b563e48c50
|
95ee60ac94b31999a6d9dedad5b6ffd3fafe501b
|
/test.py
|
b2520416df5a8c00f62a3f9ea3857d16be119951
|
[] |
no_license
|
olivetree123/XDB
|
689ae0717af58ec7ca4792d82fdd7b7b9ca07ed8
|
cdc803de623eb9dc08d038b95d3006491bda73c5
|
refs/heads/master
| 2020-03-22T21:25:59.717332
| 2018-12-24T14:50:17
| 2018-12-24T14:50:17
| 140,687,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,809
|
py
|
#coding:utf-8
import ctypes
from index import Index
from meta import DBMeta
from utils.functions import to_bytes
class BucketStruct(ctypes.Structure):
_fields_ = [
("id", ctypes.c_int),
("name", ctypes.c_char * 50),
("public", ctypes.c_bool)
]
class Bucket(object):
def __init__(self):
self.database = "hista"
self.table = "bucket"
self.primary_key = "id"
self.meta = DBMeta()
self.index = Index(self.database, self.table, self.primary_key)
self.db_file = "hista.db"
r = self._is_table_exists()
if not r:
print("table does not exist, create it now.")
self.create_table()
def _is_table_exists(self):
meta = self.meta.get(database=self.database, table=self.table)
r = True if meta else False
return r
def add(self, name, public):
pk_value = self.meta.get_next_pk(database=self.database, table=self.table)
bucket = BucketStruct()
bucket.id = pk_value
bucket.name = to_bytes(name)
bucket.public = public
with open(self.db_file, "ab") as f:
data_offset = f.tell()
f.write(bytes(bucket))
self.index.create(value=pk_value, data_offset=data_offset)
return bucket
def find(self, name):
name = to_bytes(name)
bucket = None
with open(self.db_file, "rb") as f:
content = f.read(ctypes.sizeof(BucketStruct))
while content:
bucket = BucketStruct.from_buffer_copy(content)
if bucket.name == name:
break
content = f.read(ctypes.sizeof(BucketStruct))
bucket = None
return bucket
def find_by_index(self, value):
value = to_bytes(value)
index = self.index.get(value=value)
if not index:
print("index not found.")
return
with open(self.db_file, "rb") as f:
f.seek(index.data_offset)
content = f.read(ctypes.sizeof(BucketStruct))
bucket = BucketStruct.from_buffer_copy(content)
return bucket
def create_table(self):
self.meta.create(database=self.database, table=self.table, primary_key=self.primary_key, columns="columns")
if __name__ == "__main__":
bucket_name = "gaojian3"
bucket = Bucket()
# bb = bucket.add(name=bucket_name, public=1)
# print("new bucket id = ", bb.id)
# b = bucket.find(bucket_name)
b = bucket.find_by_index(3)
if b:
print("bucket id = ", b.id)
print("bucket name = ", b.name)
print("bucket public = ", b.public)
else:
print("bucket {} not found".format(bucket_name))
# bucket.index.btree.printTree()
|
[
"olivetree123@163.com"
] |
olivetree123@163.com
|
c4fd3291d83785546dc8f6617937a1fc581bb321
|
c7d6fbccc4e72a2fb3ab9f5dc31bb74e6c86fc1e
|
/bw2regional/hashing.py
|
a8cd648aba373d630313b5e58fffaa65680b7b37
|
[] |
permissive
|
brightway-lca/brightway2-regional
|
5c97f5fd1284254e53ef104c12ce2395886f439c
|
d11bea7555b915348aff6432de6afe9034271256
|
refs/heads/main
| 2023-06-09T17:27:01.732840
| 2023-04-28T13:49:29
| 2023-04-28T13:49:29
| 246,269,473
| 2
| 2
|
BSD-3-Clause
| 2023-05-24T14:51:24
| 2020-03-10T10:18:53
|
Python
|
UTF-8
|
Python
| false
| false
| 314
|
py
|
import hashlib
def sha256(filepath, blocksize=65536):
"""Generate SHA 256 hash for file at `filepath`"""
hasher = hashlib.sha256()
fo = open(filepath, "rb")
buf = fo.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fo.read(blocksize)
return hasher.hexdigest()
|
[
"cmutel@gmail.com"
] |
cmutel@gmail.com
|
253b5b0d8eaaadfe3e1f4299e47d30499b0564ed
|
29b60cce70657099fc9ef277d86ac4399d0471d7
|
/website-20161201T160425Z/website/music/models.py
|
88b04046e08a86c5574d7c5f852af47dc92e204f
|
[] |
no_license
|
hurley1221/340
|
1bb4c70abc1af5931166b30c3581b14eb9121d0a
|
bed4a922a0a77a643d9ff92e4485cb4ed09fb874
|
refs/heads/master
| 2020-06-15T08:31:19.261119
| 2016-12-15T19:40:55
| 2016-12-15T19:40:55
| 75,309,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
from django.db import models
class Modules(models.Model):
ModuleCode = models.CharField(max_length=10)
ModuleTitle = models.CharField(max_length=40)
ModuleTutor = models.CharField(max_length=40)
def __str__(self):
return self.ModuleCode + ' ' + self.ModuleTitle + ' ' + self.ModuleTutor
class Student(models.Model):
StudentID = models.IntegerField()
StudentFirstName = models.CharField(max_length=20)
StudentLastName = models.CharField(max_length=20)
Address = models.CharField(max_length=100)
Postcode = models.CharField(max_length=7)
class ModuleMarks(models.Model):
StudentID = models.ForeignKey('Student', on_delete=models.CASCADE)
ModuleCode = models.ForeignKey('Modules', on_delete=models.CASCADE)
ModuleMark = models.IntegerField()
SubmittedDate = models.DateTimeField()
def __str__(self):
return str(self.StudentID) + ' ' + str(self.ModuleCode) + ' ' + str(self.ModuleMark) + ' ' + str(self.SubmittedDate)
class Coursework(models.Model):
ModuleCode = models.ForeignKey('Modules', on_delete=models.CASCADE)
CourseworkNo = models.CharField(max_length=10)
CourseworkTitle = models.CharField(max_length=50)
AssessmentType = models.CharField(max_length=50)
IssueDate = models.DateField()
DueDateTime = models.DateTimeField()
class Login(models.Model):
UserName = models.CharField(max_length=50)
class Password(models.Model):
Password = models.CharField(max_length=50)
|
[
"noreply@github.com"
] |
hurley1221.noreply@github.com
|
736043f23ad57f8d40694d7f59941710ebeb431a
|
86368cbbbfd4e1a2da2278efaa2301b1f85379e0
|
/nni/algorithms/compression/v2/pytorch/pruning/tools/__init__.py
|
9da94359a089559b02855d1292c066aafa0915b5
|
[
"MIT"
] |
permissive
|
squirrelsc/nni
|
708a21feeade21e955bc1213e0b67e47d09ff4bd
|
3f67d92b676d771e246ff1af5fef197d50a0b99e
|
refs/heads/master
| 2023-04-13T18:45:26.739405
| 2023-04-03T02:47:04
| 2023-04-03T02:47:04
| 159,625,872
| 0
| 3
|
MIT
| 2020-07-11T00:45:55
| 2018-11-29T07:31:00
|
Python
|
UTF-8
|
Python
| false
| false
| 184
|
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# pylint: disable=wildcard-import,unused-wildcard-import
from nni.compression.pytorch.pruning.tools import *
|
[
"noreply@github.com"
] |
squirrelsc.noreply@github.com
|
195a5a7f14975ceb256c043478c66e89976c7ebf
|
6932d8e65c751f40dad803704d4bb17b496b0fb2
|
/app_with_ui/urls.py
|
b59365ec009d000fc55de0750c9ab145d073cc36
|
[] |
no_license
|
AndriiNartov/image_project
|
4fd11846ce408d4ed337d90a6221359fa78e28b3
|
bc00c3693263bb66dead8714f2c0291e6cdefef8
|
refs/heads/main
| 2023-05-26T03:12:20.502442
| 2021-06-10T13:37:33
| 2021-06-10T13:37:33
| 374,596,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
from django.urls import path
from app_with_ui.views import IndexView, UploadImageView, ImageListView, CreateExpiryLinkView, \
ExpiryLinksList, ShowImageByExpiryLink, LoginUser, RegisterUser, ProfileView, logout_user
urlpatterns = [
path('', IndexView.as_view(), name='index'),
path('profile/', ProfileView.as_view(), name='profile'),
path('upload-image', UploadImageView.as_view(), name='upload_image'),
path('all-images', ImageListView.as_view(), name='all_images'),
path('create-expiry-link/<int:pk>/', CreateExpiryLinkView.as_view(), name='create_expiry_link'),
path('all-expired-links/', ExpiryLinksList.as_view(), name='all_expired_links'),
path('temp/<str:link>/', ShowImageByExpiryLink.as_view(), name='show_image_by_exp_link'),
path('login/', LoginUser.as_view(), name='login'),
path('register/', RegisterUser.as_view(), name='register'),
path('logout/', logout_user, name='logout'),
]
|
[
"nartov1912@gmail.com"
] |
nartov1912@gmail.com
|
28c943ca355bcaaaddc5207b193722efafd3d856
|
f7ad87d5a3a9609e3f1b3f13bfbfacccdf7b94bb
|
/scrape_mars.py
|
b9cb024d0f17d483ceaee7249f8a83dbd3e01413
|
[] |
no_license
|
MarSchien/Mars_web_scrape
|
090e002f7aa2c2bf1fd45f3ed26330e8022e4076
|
5f19b4f29fe6a2609815b3db9ef3e3fefb3d2429
|
refs/heads/master
| 2020-04-27T10:09:57.388295
| 2019-03-07T00:39:59
| 2019-03-07T00:39:59
| 174,242,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,176
|
py
|
# Dependencies
from bs4 import BeautifulSoup
from splinter import Browser
import requests
import pymongo
import pandas as pd
import time
# # @NOTE: Replace the path with your actual path to the chromedriver
# executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
# return Browser("chrome", **executable_path, headless=False)
def scrape():
url_1 = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
response = requests.get(url_1)
soup = BeautifulSoup(response.text, 'html.parser')
latest_news_title = soup.find('div', class_="content_title").a.text.strip()
latest_news_paragraph = soup.find('div', class_="rollover_description_inner").text.strip()
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
url_2 = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url_2)
browser.click_link_by_partial_text('FULL IMAGE')
time.sleep(2)
browser.click_link_by_partial_text('more info')
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
first_image = soup.find('img', class_="main_image")['src']
root_url = 'https://www.jpl.nasa.gov'
featured_image_url = root_url + str(first_image)
url_3 = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url_3)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
mars_weather = soup.find('p', class_='TweetTextSize TweetTextSize--normal js-tweet-text tweet-text').text
url_4 = 'https://space-facts.com/mars/'
tables = pd.read_html(url_4)
df = tables[0]
df.columns=['Measurement', 'Value']
df.set_index('Measurement', inplace=True)
table_html = df.to_html().replace('\n','')
data = {}
data['latest_news_title'] = latest_news_title
data['latest_news_paragraph'] = latest_news_paragraph
data['latest_image'] = featured_image_url
data['mars_weather'] = mars_weather
data['table'] = table_html
return data
|
[
"noreply@github.com"
] |
MarSchien.noreply@github.com
|
172c4a4365179275d336f28d7f86dff52b22c221
|
529b02be0fa388d10ea1581f7804820d72719ace
|
/Matrix_Week1/submit_inverse_index_lab.py
|
15881ee029df94455b9d85905d7ada26438bfc5a
|
[] |
no_license
|
sharov-am/Coursera_Matrix
|
8dc7e79f348270b5a6acb8faa484574584a5cdc2
|
6fccb1ee24fdd628bb9ac3392c46e2406b41e939
|
refs/heads/master
| 2021-01-18T19:19:42.723295
| 2013-07-23T06:45:45
| 2013-07-23T06:45:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,416
|
py
|
######## ########
# Hi there, curious student. #
# #
# This submission script runs some tests on your #
# code and then uploads it to Coursera for grading. #
# #
# Changing anything in this script might cause your #
# submissions to fail. #
######## ########
import os
import sys
import doctest
import traceback
import urllib.request
import urllib.parse
import urllib.error
import base64
import hashlib
import ast
URL = 'matrix-001'
partFriendlyNames = ['Movie Review', 'Dictionary Utilities', 'List Range to Dictionary', 'Make Inverse Index', 'Or Search', 'And Search']
groups = [[('jW1JmPJ7MxozuKUU', 'Movie Review', '>>> print(test_format(len({movie_review(str(randint(1,300))) for _ in range(300)})))\n')], [('u6z9uemgY2XO5i8b', 'Dictionary Utilities', '>>> import dictutil\n>>> print(test_format(dictutil.dict2list({1:2},[1])))\n>>> print(test_format(dictutil.list2dict([2],[1])))\n')], [('1W5vN6D5KTioevrb', 'List Range to Dictionary', ">>> print(test_format([listrange2dict(l) for l in [[0,1,2],['a','b'], range(100)]]))\n")], [('i06zNsrbnOmf4ASS', 'Make Inverse Index', '>>> stories = list(open("stories_small.txt"))\n>>> idx = makeInverseIndex(stories)\n>>> print(test_format([idx[w] for w in [\'leaving\', \'Florida\', \'After\', \'debate\', \'workers\', \'For\', \'use\']]))\n')], [('LEmkQURt338uFT3y', 'Or Search', '>>> stories = list(open("stories_small.txt"))\n>>> idx = makeInverseIndex(stories)\n>>> print(test_format(orSearch(idx, [\'travelers\', \'use\', \'Baltimore\', \'major-league\', \'whether\'])))\n')], [('7D2cgEQeGuInopfV', 'And Search', '>>> stories = list(open("stories_small.txt"))\n>>> idx = makeInverseIndex(stories)\n>>> print(test_format(andSearch(idx, [\'made\', \'are\'])))\n>>> print(test_format(andSearch(idx, [\'the\', \'in\', \'use\', \'times\'])))\n')]]
sourceFiles = ['inverse_index_lab.py'] * len(sum(groups,[]))
try:
import inverse_index_lab as solution
test_vars = vars(solution).copy()
except Exception as exc:
print(exc)
print("!! It seems like you have an error in your stencil file. Please fix before submitting.")
sys.exit(1)
def find_lines(varname):
return list(filter(lambda l: varname in l, list(open("python_lab.py"))))
def find_line(varname):
ls = find_lines(varname)
return ls[0] if len(ls) else None
def use_comprehension(varname):
lines = find_lines(varname)
for line in lines:
try:
if "comprehension" in ast.dump(ast.parse(line)):
return True
except: pass
return False
def double_comprehension(varname):
line = find_line(varname)
return ast.dump(ast.parse(line)).count("comprehension") == 2
def line_contains_substr(varname, word):
lines = find_line(varname)
for line in lines:
if word in line:
return True
return False
def test_format(obj, precision=6):
tf = lambda o: test_format(o, precision)
delimit = lambda o: ', '.join(o)
otype = type(obj)
if otype is str:
return "'%s'" % obj
elif otype is float:
fstr = '%%.%dg' % precision
return fstr % obj
elif otype is set:
return '{%s}' % delimit(sorted(map(tf, obj)))
elif otype is dict:
return '{%s}' % delimit(sorted(tf(k)+': '+tf(v) for k,v in obj.items()))
elif otype is list:
return '[%s]' % delimit(map(tf, obj))
elif otype is tuple:
return '(%s%s)' % (delimit(map(tf, obj)), ',' if len(obj) is 1 else '')
elif otype.__name__ in ['Vec','Mat']:
entries = delimit(map(tf, sorted(filter(lambda o: o[1] != 0, obj.f.items()))))
return '<%s %s {%s}>' % (otype.__name__, test_format(obj.D), entries)
else:
return str(obj)
def output(tests):
dtst = doctest.DocTestParser().get_doctest(tests, test_vars, 0, '<string>', 0)
runner = ModifiedDocTestRunner()
runner.run(dtst)
return ''.join(map(str.rstrip, runner.results))
test_vars['test_format'] = test_vars['tf'] = test_format
test_vars['find_lines'] = find_lines
test_vars['find_line'] = find_line
test_vars['use_comprehension'] = use_comprehension
test_vars['double_comprehension'] = double_comprehension
test_vars['line_contains_substr'] = line_contains_substr
class ModifiedDocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
self.results = []
return super(ModifiedDocTestRunner, self).__init__(*args, checker=OutputAccepter(), **kwargs)
def report_success(self, out, test, example, got):
self.results.append(got)
def report_unexpected_exception(self, out, test, example, exc_info):
exf = traceback.format_exception_only(exc_info[0], exc_info[1])[-1]
self.results.append(exf)
class OutputAccepter(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
return True
challenge_url = 'https://class.coursera.org/%s/assignment/challenge' % URL
submit_url = 'https://class.coursera.org/%s/assignment/submit' % URL
def submit():
print('==\n== Submitting Solutions \n==\n')
(login, password) = loginPrompt()
if not login:
print('!! Submission Cancelled')
return
print('\n== Connecting to Coursera ... \n')
parts = partPrompt()
if parts is None: return
while len(parts) == 0:
print('\n!! Cannot submit ungraded parts')
parts = partPrompt()
for (sid, name, part_tests) in parts:
print('\n== Submitting "%s" \n' % name)
if 'DEV' in os.environ: sid += '-dev'
(login, ch, state, ch_aux) = getChallenge(login, sid)
if (not login) or (not ch) or (not state):
print('\n!! Error: %s\n' % login)
return
# to stop Coursera's strip() from doing anything, we surround in parens
prog_out = '(%s)' % output(part_tests)
token = challengeResponse(login, password, ch)
src = source(sid)
if 'DEBUG' in os.environ: print('==== Output: %s' % prog_out.replace('\n','\\n'))
feedback = submitSolution(login, token, sid, prog_out, src, state, ch_aux)
if len(feedback.strip()) > 0:
print('==== Feedback: %s\n' % feedback.strip())
def loginPrompt():
"""Prompt the user for login credentials. Returns a tuple (login, password)."""
if 'COURSERA_EMAIL' in os.environ:
login = os.environ['COURSERA_EMAIL']
else:
login = input('Login email address: ')
if 'COURSERA_PASS' in os.environ:
password = os.environ['COURSERA_PASS']
else:
password = input("One-time password from the assignment page (NOT your own account\'s password): ")
return login, password
def partPrompt():
print('These are the assignment parts that you can submit:\n')
for i, name in enumerate(partFriendlyNames):
print(' %d) %s' % (i+1, name))
def extract_range(s):
s = s.split('-')
if len(s) == 1: return [int(s[0])]
else: return list(range(int(s[0]), 1+int(s[1])))
their_input = input('\nWhich parts do you want to submit? (Ex: 1, 4-7): ')
parts = map(extract_range, their_input.split(','))
flat_parts = sum(parts, [])
return sum(list(map(lambda p: groups[p-1], flat_parts)),[])
def getChallenge(email, sid):
"""Gets the challenge salt from the server. Returns (email,ch,state,ch_aux)."""
values = {'email_address' : email,
'assignment_part_sid' : sid,
'response_encoding' : 'delim'
}
data = urllib.parse.urlencode(values).encode('utf-8')
req = urllib.request.Request(challenge_url, data)
response = urllib.request.urlopen(req)
text = response.readall().decode('utf-8').strip()
# text is of the form email|ch|signature
splits = text.split('|')
if len(splits) != 9:
print('Badly formatted challenge response: %s' % text)
sys.exit(1)
return (splits[2], splits[4], splits[6], splits[8])
def challengeResponse(email, passwd, challenge):
sha1 = hashlib.sha1()
sha1.update(('%s%s' % (challenge, passwd)).encode('utf-8'))
return ''.join(sha1.hexdigest())
def submitSolution(email_address, ch_resp, sid, output, source, state, ch_aux):
"""Submits a solution to the server. Returns (result, string)."""
source_64 = str(base64.encodebytes(source.encode('utf-8')), 'ascii')
output_64 = str(base64.encodebytes(output.encode('utf-8')), 'ascii')
values = { 'assignment_part_sid' : sid,
'email_address' : email_address,
'submission' : output_64,
'submission_aux' : source_64,
'challenge_response' : ch_resp,
'state' : state
}
data = urllib.parse.urlencode(values).encode('utf-8')
req = urllib.request.Request(submit_url, data)
response = urllib.request.urlopen(req)
string = response.readall().decode('utf-8').strip()
return string
def source(sid):
""" This collects the source code, for logging purposes. """
f = open(sourceFiles[0])
src = f.read()
f.close()
return src
if __name__ == '__main__':
submit()
|
[
"18cc@mail.ru"
] |
18cc@mail.ru
|
66f753c83ed4076f1a3a771fe91e85382b1b0758
|
79587d6ef3e3f5a36838ec83d4734363e592895d
|
/execute_task/execute_task/urls.py
|
50c0c72e5bbccc2c098b2552dd15acd9af04839a
|
[] |
no_license
|
s57445560/user_command_web
|
8bb338c0873e6840a60be2a59b826765dbadf657
|
d0a1a51aaae22c66b9d326bb2ce08c4932fce1a5
|
refs/heads/master
| 2021-08-05T20:23:56.376365
| 2018-07-27T09:20:39
| 2018-07-27T09:20:39
| 132,120,845
| 68
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
"""execute_task URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from task import urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^monitor/', include('task.urls')),
]
|
[
"sy.manage@hotmail.com"
] |
sy.manage@hotmail.com
|
45c8c8870e391554d36f19b6eba7dfa27dbca758
|
61490d2cb8f59bb220bde14a8fbdff8b3155bf82
|
/sharpspark/settings.py
|
29275e8ce274fd260f6a6f1f80d4ff008c0b1638
|
[] |
no_license
|
grlownzl/sharpspark
|
dd8e1bd0afdf002e7ced0a4befd67d6c8f03cd0c
|
75b97c28f11cadf5bd8a96cd1defbaec0e537458
|
refs/heads/master
| 2016-09-05T12:42:33.561666
| 2013-03-31T21:09:11
| 2013-03-31T21:09:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,748
|
py
|
# Django settings for sharpspark project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
PROJECT_DIR = os.path.dirname(__file__)
PUBLIC_DIR = os.path.join(PROJECT_DIR, 'public')
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, '..', 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ap8el))q4_y*s+_xeu4cnvxmu(vg3f6-bqom4wnss2-f)yt8xs'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'sharpspark.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'sharpspark.wsgi.application'
import os
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), '..', 'templates').replace('\\','/'),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'bootstrap_toolkit',
'tagging',
'mptt',
'zinnia',
'django_markdown',
'sharpspark_ui',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
BOOTSTRAP_BASE_URL = 'http://twitter.github.com/bootstrap/assets/'
BOOTSTRAP_CSS_BASE_URL = BOOTSTRAP_BASE_URL + 'css/'
BOOTSTRAP_CSS_URL = BOOTSTRAP_CSS_BASE_URL + 'bootstrap.css'
BOOTSTRAP_JS_BASE_URL = BOOTSTRAP_BASE_URL + 'js/'
|
[
"grlownzl@gmail.com"
] |
grlownzl@gmail.com
|
55e1291f11cb3e888a844068c58741172efff4a7
|
757779e9dd60e54947a60cf276ba5a8c571f4d16
|
/planners/nn_waypoint_planner.py
|
2f9869b48b4e64aa9c4315e0136d231f2d58a50d
|
[] |
no_license
|
SFU-MARS/WayPtNav-reachability
|
4c74204b66a8d12c0721d6860fe3340995e6a81f
|
4429e315242eae85df81fb3c5b0f3dd52ae434cf
|
refs/heads/master
| 2022-03-30T15:56:50.482198
| 2020-02-14T21:47:31
| 2020-02-14T21:47:31
| 237,542,764
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,709
|
py
|
import tensorflow as tf
from planners.nn_planner import NNPlanner
from trajectory.trajectory import Trajectory, SystemConfig
class NNWaypointPlanner(NNPlanner):
""" A planner which selects an optimal waypoint using
a trained neural network. """
def __init__(self, simulator, params):
super(NNWaypointPlanner, self).__init__(simulator, params)
self.waypoint_world_config = SystemConfig(dt=self.params.dt, n=1, k=1)
def optimize(self, start_config):
""" Optimize the objective over a trajectory
starting from start_config.
"""
p = self.params
model = p.model
raw_data = self._raw_data(start_config)
processed_data = model.create_nn_inputs_and_outputs(raw_data)
# Predict the NN output
nn_output_113 = model.predict_nn_output_with_postprocessing(processed_data['inputs'],
is_training=False)[:, None]
# Transform to World Coordinates
waypoint_ego_config = SystemConfig(dt=self.params.dt, n=1, k=1,
position_nk2=nn_output_113[:, :, :2],
heading_nk1=nn_output_113[:, :, 2:3])
self.params.system_dynamics.to_world_coordinates(start_config,
waypoint_ego_config,
self.waypoint_world_config)
# Evaluate the objective and retrieve Control Pipeline data
obj_vals, data = self.eval_objective(start_config, self.waypoint_world_config)
# The batch dimension is length 1 since there is only one waypoint
min_idx = 0
min_cost = obj_vals[min_idx]
waypts, horizons_s, trajectories_lqr, trajectories_spline, controllers = data
self.opt_waypt.assign_from_config_batch_idx(waypts, min_idx)
self.opt_traj.assign_from_trajectory_batch_idx(trajectories_lqr, min_idx)
# Convert horizon in seconds to horizon in # of steps
min_horizon = int(tf.ceil(horizons_s[min_idx, 0]/self.params.dt).numpy())
data = {'system_config': SystemConfig.copy(start_config),
'waypoint_config': SystemConfig.copy(self.opt_waypt),
'trajectory': Trajectory.copy(self.opt_traj),
'spline_trajectory': Trajectory.copy(trajectories_spline),
'planning_horizon': min_horizon,
'K_nkfd': controllers['K_nkfd'][min_idx:min_idx + 1],
'k_nkf1': controllers['k_nkf1'][min_idx:min_idx + 1],
'img_nmkd': raw_data['img_nmkd']}
return data
|
[
"anjianl@cs-mars-04.cmpt.sfu.ca"
] |
anjianl@cs-mars-04.cmpt.sfu.ca
|
1aa20d6c7c87c06d98602b8b2eb0e7fc27e64cec
|
e7d947bf022d836de574b15324803950046a2789
|
/cap4_ex53.py
|
d4985665bc383c38e88feb86a4738b539e110de0
|
[] |
no_license
|
ChikusMOC/Exercicios-Capitulo-4
|
1d1889f3882301cf756565187dc08636ea7e4fb4
|
292afb529850afdfbcb9c90d97478900ac568dff
|
refs/heads/master
| 2022-03-13T02:18:46.902724
| 2019-11-25T04:04:30
| 2019-11-25T04:04:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
"""
Exercicio 53
"""
comprimento = float(input("Comprimento: "))
largura = float(input("Largura: "))
preco_tela = float(input("Preço do metro da tela: "))
valor = 2*(comprimento+largura) * preco_tela
print(f"Você irá gastar R${valor} para cercar seu terreno")
|
[
"noreply@github.com"
] |
ChikusMOC.noreply@github.com
|
f98fbeb004b7da13a4c322470a01fcd04396cc03
|
587be7ae3f60e0014de4098b4c2ee1d80706e6f9
|
/official/vision/serving/export_base.py
|
d7dcd61eaa8a7c2076c750f57287f6045ebbb5d5
|
[
"Apache-2.0"
] |
permissive
|
npfp/models
|
56bc0c40d1bc7b8889a28eb8b456d8116a256539
|
6d3756e814a7f98b24666bf5465ed82074a40f9f
|
refs/heads/master
| 2022-06-13T23:19:52.411777
| 2022-04-08T01:03:51
| 2022-04-08T01:04:46
| 129,899,186
| 0
| 0
|
Apache-2.0
| 2018-04-17T12:12:18
| 2018-04-17T12:12:18
| null |
UTF-8
|
Python
| false
| false
| 7,180
|
py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for model export."""
import abc
from typing import Dict, List, Mapping, Optional, Text
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
class ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):
"""Base Export Module."""
def __init__(self,
params: cfg.ExperimentConfig,
*,
batch_size: int,
input_image_size: List[int],
input_type: str = 'image_tensor',
num_channels: int = 3,
model: Optional[tf.keras.Model] = None):
"""Initializes a module for export.
Args:
params: Experiment params.
batch_size: The batch size of the model input. Can be `int` or None.
input_image_size: List or Tuple of size of the input image. For 2D image,
it is [height, width].
input_type: The input signature type.
num_channels: The number of the image channels.
model: A tf.keras.Model instance to be exported.
"""
self.params = params
self._batch_size = batch_size
self._input_image_size = input_image_size
self._num_channels = num_channels
self._input_type = input_type
if model is None:
model = self._build_model() # pylint: disable=assignment-from-none
super().__init__(params=params, model=model)
def _decode_image(self, encoded_image_bytes: str) -> tf.Tensor:
"""Decodes an image bytes to an image tensor.
Use `tf.image.decode_image` to decode an image if input is expected to be 2D
image; otherwise use `tf.io.decode_raw` to convert the raw bytes to tensor
and reshape it to desire shape.
Args:
encoded_image_bytes: An encoded image string to be decoded.
Returns:
A decoded image tensor.
"""
if len(self._input_image_size) == 2:
# Decode an image if 2D input is expected.
image_tensor = tf.image.decode_image(
encoded_image_bytes, channels=self._num_channels)
image_tensor.set_shape((None, None, self._num_channels))
else:
# Convert raw bytes into a tensor and reshape it, if not 2D input.
image_tensor = tf.io.decode_raw(encoded_image_bytes, out_type=tf.uint8)
image_tensor = tf.reshape(image_tensor,
self._input_image_size + [self._num_channels])
return image_tensor
def _decode_tf_example(
self, tf_example_string_tensor: tf.train.Example) -> tf.Tensor:
"""Decodes a TF Example to an image tensor.
Args:
tf_example_string_tensor: A tf.train.Example of encoded image and other
information.
Returns:
A decoded image tensor.
"""
keys_to_features = {'image/encoded': tf.io.FixedLenFeature((), tf.string)}
parsed_tensors = tf.io.parse_single_example(
serialized=tf_example_string_tensor, features=keys_to_features)
image_tensor = self._decode_image(parsed_tensors['image/encoded'])
return image_tensor
def _build_model(self, **kwargs):
"""Returns a model built from the params."""
return None
@tf.function
def inference_from_image_tensors(
self, inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:
return self.serve(inputs)
@tf.function
def inference_for_tflite(self, inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:
return self.serve(inputs)
@tf.function
def inference_from_image_bytes(self, inputs: tf.Tensor):
with tf.device('cpu:0'):
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._decode_image,
elems=inputs,
fn_output_signature=tf.TensorSpec(
shape=[None] * len(self._input_image_size) +
[self._num_channels],
dtype=tf.uint8),
parallel_iterations=32))
images = tf.stack(images)
return self.serve(images)
@tf.function
def inference_from_tf_example(self,
inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:
with tf.device('cpu:0'):
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._decode_tf_example,
elems=inputs,
# Height/width of the shape of input images is unspecified (None)
# at the time of decoding the example, but the shape will
# be adjusted to conform to the input layer of the model,
# by _run_inference_on_image_tensors() below.
fn_output_signature=tf.TensorSpec(
shape=[None] * len(self._input_image_size) +
[self._num_channels],
dtype=tf.uint8),
dtype=tf.uint8,
parallel_iterations=32))
images = tf.stack(images)
return self.serve(images)
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for key, def_name in function_keys.items():
if key == 'image_tensor':
input_signature = tf.TensorSpec(
shape=[self._batch_size] + [None] * len(self._input_image_size) +
[self._num_channels],
dtype=tf.uint8)
signatures[
def_name] = self.inference_from_image_tensors.get_concrete_function(
input_signature)
elif key == 'image_bytes':
input_signature = tf.TensorSpec(
shape=[self._batch_size], dtype=tf.string)
signatures[
def_name] = self.inference_from_image_bytes.get_concrete_function(
input_signature)
elif key == 'serve_examples' or key == 'tf_example':
input_signature = tf.TensorSpec(
shape=[self._batch_size], dtype=tf.string)
signatures[
def_name] = self.inference_from_tf_example.get_concrete_function(
input_signature)
elif key == 'tflite':
input_signature = tf.TensorSpec(
shape=[self._batch_size] + self._input_image_size +
[self._num_channels],
dtype=tf.float32)
signatures[def_name] = self.inference_for_tflite.get_concrete_function(
input_signature)
else:
raise ValueError('Unrecognized `input_type`')
return signatures
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
91da9dd56045bd0207ca34ad9c5553f170b37e04
|
bd55411f9c464b145e51b33551796acecee67acd
|
/tests/test_visitors/test_ast/test_statements/test_parameters_indentation/test_collection_indentation.py
|
50dd6b9c155d0b56ebccd7f6e0630644db7dcaae
|
[
"MIT"
] |
permissive
|
makarchuk/wemake-python-styleguide
|
dd1f6f5113c8a056857f03bac7ac1efb5615b5c6
|
f976eed24613fbd6ea29f1cc589d8f863165a7aa
|
refs/heads/master
| 2020-04-13T06:33:12.026229
| 2018-12-24T12:01:20
| 2018-12-24T12:01:20
| 163,023,989
| 0
| 0
|
MIT
| 2018-12-24T21:18:14
| 2018-12-24T21:18:14
| null |
UTF-8
|
Python
| false
| false
| 3,902
|
py
|
# -*- coding: utf-8 -*-
import pytest
from wemake_python_styleguide.visitors.ast.statements import (
ParametersIndentationViolation,
WrongParametersIndentationVisitor,
)
# Correct:
correct_single_line_tuple = 'xy = (1, 2, 3)'
correct_single_line_list = 'xy = [1, 2, 3]'
correct_single_line_set = 'xy = {1, 2, 3}'
correct_single_line_dict = 'xy = {"key": [1, 2], "other": {1, 2}, "w": (1, 2)}'
correct_multiline_string = """
xy = (
'first'
'second'
'last'
)
"""
correct_multi_line_tuple = """
xy = (
1,
2,
3,
)
"""
correct_multi_line_list = """
xy = [
1,
2,
3,
]
"""
correct_multi_line_set = """
xy = {
1,
2,
3,
}
"""
correct_multi_line_dict = """
xy = {
1: 1,
2: 2,
3: 3,
}
"""
correct_next_line_tuple = """
xy = (
1, 2, 3,
)
"""
correct_next_line_list = """
xy = [
1, 2, 3,
]
"""
correct_next_line_set = """
xy = {
1, 2, 3,
}
"""
correct_next_line_tuple = """
xy = {
1: 1, 2: 2, 3: 3,
}
"""
correct_nested_collections = """
xy = {
'key': [
1, 2, 3,
],
'other': (
'first',
'second',
),
'single': {1, 2, 3},
'multiple': {
1: [
1,
1,
1,
],
},
'ending': 5,
}
"""
# Wrong:
wrong_tuple_indentation1 = """
xy = (1,
2, 3)
"""
wrong_tuple_indentation2 = """
xy = (1, 2,
3)
"""
wrong_tuple_indentation3 = """
xy = (
1, 2,
3,
)
"""
wrong_tuple_indentation4 = """
xy = (
1,
2, 3,
)
"""
wrong_list_indentation1 = """
xy = [1,
2, 3]
"""
wrong_list_indentation2 = """
xy = [1, 2,
3]
"""
wrong_list_indentation3 = """
xy = [
1, 2,
3,
]
"""
wrong_list_indentation4 = """
xy = [
1,
2, 3,
]
"""
wrong_set_indentation1 = """
xy = {1,
2, 3}
"""
wrong_set_indentation2 = """
xy = {1, 2,
3}
"""
wrong_set_indentation3 = """
xy = {
1, 2,
3,
}
"""
wrong_set_indentation4 = """
xy = {
1,
2, 3,
}
"""
wrong_dict_indentation1 = """
xy = {1: 1,
2: 2, 3: 3}
"""
wrong_dict_indentation2 = """
xy = {1: 1, 2: 2,
3: 3}
"""
wrong_dict_indentation3 = """
xy = {
1: 1, 2: 2,
3: 3,
}
"""
wrong_dict_indentation4 = """
xy = {
1: 1,
2: 2, 3: 3,
}
"""
@pytest.mark.parametrize('code', [
correct_multiline_string,
correct_single_line_tuple,
correct_single_line_list,
correct_single_line_set,
correct_single_line_dict,
correct_multi_line_tuple,
correct_multi_line_list,
correct_multi_line_set,
correct_multi_line_dict,
correct_next_line_tuple,
correct_next_line_list,
correct_next_line_set,
correct_next_line_tuple,
correct_nested_collections,
])
def test_correct_collection_indentation(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that correctly indented collections work."""
tree = parse_ast_tree(code)
visitor = WrongParametersIndentationVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
wrong_tuple_indentation1,
wrong_tuple_indentation2,
wrong_tuple_indentation3,
wrong_tuple_indentation4,
wrong_list_indentation1,
wrong_list_indentation2,
wrong_list_indentation3,
wrong_list_indentation4,
wrong_set_indentation1,
wrong_set_indentation2,
wrong_set_indentation3,
wrong_set_indentation4,
wrong_dict_indentation1,
wrong_dict_indentation2,
wrong_dict_indentation3,
wrong_dict_indentation4,
])
def test_wrong_collection_indentation(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that poorly indented collections do not work."""
tree = parse_ast_tree(code)
visitor = WrongParametersIndentationVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ParametersIndentationViolation])
|
[
"mail@sobolevn.me"
] |
mail@sobolevn.me
|
fc675d97a03e3a88d8ca032da47af403a0ddb72d
|
955acac376cbfb936766b218fa6d4444da7dfd58
|
/salesanalyzer/settings.py
|
2cb8fa8ba6a7b5a46bb6a30590c5c5c6b9067cc5
|
[] |
no_license
|
tejaser/salesAnalyzer
|
bcd929c1ba10455cb55fdf4ac3c82ebab9f44906
|
ef36d5d15dc4209fe8c5b616f2ef0b5183ffc628
|
refs/heads/master
| 2020-03-25T17:44:53.857288
| 2018-08-08T15:08:31
| 2018-08-08T15:08:31
| 143,993,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,313
|
py
|
"""
Django settings for salesanalyzer project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8m98=48z4q*=fz%t5kq%#f)(5&*)^5^_-sc8zd9we@!h86^n#p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dataSink'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'salesanalyzer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'salesanalyzer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
[
"tejas.er@outlook.com"
] |
tejas.er@outlook.com
|
541ef89471ca322a43aeadd5a44880e78f66a647
|
15a992391375efd487b6442daf4e9dd963167379
|
/tests/test_ensemble_evaluator.py
|
dab46f366f696a60f4afcdd25379d30e19484c1e
|
[
"Apache-2.0"
] |
permissive
|
Bala93/MONAI
|
b0e68e1b513adcd20eab5158d4a0e5c56347a2cd
|
e0a7eff5066da307a73df9145077f6f1fec7a514
|
refs/heads/master
| 2022-08-22T18:01:25.892982
| 2022-08-12T18:13:53
| 2022-08-12T18:13:53
| 259,398,958
| 2
| 0
| null | 2020-04-27T17:09:12
| 2020-04-27T17:09:11
| null |
UTF-8
|
Python
| false
| false
| 3,080
|
py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from ignite.engine import EventEnum, Events
from parameterized import parameterized
from monai.engines import EnsembleEvaluator
TEST_CASE_1 = [["pred_0", "pred_1", "pred_2", "pred_3", "pred_4"]]
TEST_CASE_2 = [None]
class TestEnsembleEvaluator(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2])
def test_content(self, pred_keys):
device = torch.device("cpu:0")
class TestDataset(torch.utils.data.Dataset):
def __len__(self):
return 8
def __getitem__(self, index):
return {"image": torch.tensor([index]), "label": torch.zeros(1)}
val_loader = torch.utils.data.DataLoader(TestDataset())
class TestNet(torch.nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
net0 = TestNet(lambda x: x + 1)
net1 = TestNet(lambda x: x + 2)
net2 = TestNet(lambda x: x + 3)
net3 = TestNet(lambda x: x + 4)
net4 = TestNet(lambda x: x + 5)
class CustomEvents(EventEnum):
FOO_EVENT = "foo_event"
BAR_EVENT = "bar_event"
val_engine = EnsembleEvaluator(
device=device,
val_data_loader=val_loader,
networks=[net0, net1, net2, net3, net4],
pred_keys=pred_keys,
event_names=["bwd_event", "opt_event", CustomEvents],
event_to_attr={CustomEvents.FOO_EVENT: "foo", "opt_event": "opt"},
)
@val_engine.on(Events.ITERATION_COMPLETED)
def run_transform(engine):
for i in range(5):
expected_value = engine.state.iteration + i
torch.testing.assert_allclose(engine.state.output[0][f"pred_{i}"].item(), expected_value)
@val_engine.on(Events.EPOCH_COMPLETED)
def trigger_custom_event():
val_engine.fire_event(CustomEvents.FOO_EVENT)
val_engine.fire_event(CustomEvents.BAR_EVENT)
val_engine.fire_event("bwd_event")
val_engine.fire_event("opt_event")
@val_engine.on(CustomEvents.FOO_EVENT)
def do_foo_op():
self.assertEqual(val_engine.state.foo, 0)
@val_engine.on("opt_event")
def do_bar_op():
self.assertEqual(val_engine.state.opt, 0)
val_engine.run()
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
Bala93.noreply@github.com
|
95b1757dd9afbdfd5cefa3d1e466aab5df4338c7
|
091bd8f744f1eae5d53db4f4f6c2a6a49f7a4ff7
|
/Inventário.py
|
2b25ab8b8eaae4d14afb464fd067d4a7f01fc396
|
[] |
no_license
|
juliaribeiromartins/EP1
|
176dae67cbf9687f1d98a83c8315d1c489ecc148
|
1005a2792f0d56e8226549c86c9c5fdc2a442dc0
|
refs/heads/master
| 2020-05-09T19:30:35.306141
| 2019-04-22T22:42:47
| 2019-04-22T22:42:47
| 181,380,703
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 18:26:13 2019
@author: julia
"""
inventario=[]
if nome_cenario_atual=="elevador":
inventario.append("doce")
elif nome_cenario_atual=="help desk" or nome_cenario_atual=="sala do wii" or nome_cenario_atual=="quadra":
inventario.append("pen drive")
|
[
"julia.r.martins@hotmail.com"
] |
julia.r.martins@hotmail.com
|
63a3cbe4b5564355ccb2640b96bf8c3ccb3faeb9
|
afc549c50d7dbfb06d3968a88898318c0490a0f9
|
/MyBook/Book/admin.py
|
80d1aa7c7af638b624ae331c00b410fe17320c6f
|
[] |
no_license
|
SHAOOR786/MyBook
|
81daa02735bfed4be0067219b0f163ba03ee152c
|
7d4b539e3433e9c8cafcfe94e183c6b1f22cc03b
|
refs/heads/master
| 2023-06-03T18:59:53.516889
| 2021-06-23T18:50:10
| 2021-06-23T18:50:10
| 379,700,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
from django.contrib import admin
from .models import Profile,Skill,Document
admin.site.register(Profile)
admin.site.register(Skill)
admin.site.register(Document)
|
[
"aquariusca01@gmail.com"
] |
aquariusca01@gmail.com
|
11a784b4d90b8c37d4e62c235f0b49ec39ba01fc
|
d63463de2f333b4ee0e199ff56f36cdc90d7d1e7
|
/devel/lib/python2.7/dist-packages/franka_gripper/msg/_GraspEpsilon.py
|
e94a4658cc0c15c396344cb3620737c1d9b518a1
|
[] |
no_license
|
christinaionescu/panda_testing
|
9ed9b888f6cc261ac4ca60a1403e38610f07b013
|
77a00ee936b1f1ab244cf9491792c7ecac544eae
|
refs/heads/master
| 2023-03-22T04:15:12.845237
| 2021-03-01T18:54:36
| 2021-03-01T18:54:36
| 343,522,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
/home/pandanuc3/catkin_ws4/devel/.private/franka_gripper/lib/python2.7/dist-packages/franka_gripper/msg/_GraspEpsilon.py
|
[
"christina.ionescu@ifu.rwth-aachen.de"
] |
christina.ionescu@ifu.rwth-aachen.de
|
1b29f31242f44e5526ecdee8fdc08f2a589b247e
|
a6106cedc42dcab94ccc4ee6d681372d2246ce5e
|
/python/활용자료/예제/08/ex8-6.py
|
5100389ad3e103df964a9c3942950d5d9307659b
|
[] |
no_license
|
leemyoungwoo/pybasic
|
a5a4b68d6b3ddd6f07ff84dc8df76da02650196f
|
481075f15613c5d8add9b8c4d523282510d146d2
|
refs/heads/master
| 2022-10-08T19:57:26.073431
| 2020-06-15T06:50:02
| 2020-06-15T06:50:02
| 267,502,565
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
import random
for i in range(3) :
print(random.random())
|
[
"mwlee2587@gmail.com"
] |
mwlee2587@gmail.com
|
27b747558c9c86856132c92394b80cd6a81d56b1
|
046b58fe88ba26bc0e9a5a1b908e4938fc974f7b
|
/exercise_2/utils/decorators.py
|
e870de742fb38d508eac80d8e6b05690900ead89
|
[] |
no_license
|
igortsallagov/py-adpy-04
|
44b06edc0ce3c510ed6306849f0e94590d69edb6
|
f6a13f98a76cfe410cb25e21557624a969ecc64b
|
refs/heads/master
| 2020-04-21T03:56:13.595375
| 2019-02-08T09:53:15
| 2019-02-08T09:53:15
| 169,298,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
import datetime
def logging(func):
def write_logs(phone_book_instance):
date = datetime.datetime.today()
log_string = f'{date} // {phone_book_instance.name} // {func.__name__}\n'
with open('log.txt', 'a') as log_file:
log_file.write(log_string)
return func(phone_book_instance)
return write_logs
|
[
"igor.tsallagov@gmail.com"
] |
igor.tsallagov@gmail.com
|
c0602df1039a67e3d82d9676b44fe1c6356a2989
|
e2e14dee0f7936279ea2a54866413ef1d9a2992f
|
/nani/descriptors.py
|
a21187f7fdd1f261786e4b1712b0da453ffc03c1
|
[
"BSD-3-Clause"
] |
permissive
|
stefanfoulis/project-nani
|
650a26ca4c70c4c97238f4407a5e9297cab0b5d3
|
5407886721331c1b1c735af5c8c5a1b1f64eb496
|
refs/heads/master
| 2020-04-07T19:10:54.335628
| 2011-03-04T16:21:39
| 2011-03-04T16:21:39
| 1,434,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,622
|
py
|
from nani.utils import get_translation
class NULL:pass
class BaseDescriptor(object):
"""
Base descriptor class with a helper to get the translations instance.
"""
def __init__(self, opts):
self.opts = opts
def translation(self, instance):
cached = getattr(instance, self.opts.translations_cache, None)
if not cached:
cached = get_translation(instance)
setattr(instance, self.opts.translations_cache, cached)
return cached
class TranslatedAttribute(BaseDescriptor):
"""
Basic translated attribute descriptor.
Proxies attributes from the shared instance to the translated instance.
"""
def __init__(self, opts, name):
self.name = name
super(TranslatedAttribute, self).__init__(opts)
def __get__(self, instance, instance_type=None):
if not instance:
# Don't raise an attribute error so we can use it in admin.
return self.opts.translations_model._meta.get_field_by_name(self.name)[0].default
return getattr(self.translation(instance), self.name)
def __set__(self, instance, value):
if not instance:
raise AttributeError()
setattr(self.translation(instance), self.name, value)
def __delete__(self, instance):
if not instance:
raise AttributeError()
delattr(self.translation(instance), self.name)
class LanguageCodeAttribute(TranslatedAttribute):
"""
The language_code attribute is different from other attribtues as it cannot
be deleted. Trying to do so will always cause an attribute error.
"""
def __init__(self, opts):
super(LanguageCodeAttribute, self).__init__(opts, 'language_code')
def __set__(self, instance, value):
"""
Setting the language_code attribute is a bit odd.
When changing the language_code on an instance, we try to grab the
existing translation and copy over the unfilled fields from that
translation onto the instance. If no such translation exist, create one
and copy over the fields from the instance.
This is used to translate instances.
This will also refresh the translations cache attribute on the instance.
EG:
english = MyModel.objects.get(pk=1, language_code='en')
english.language_code = 'ja'
english.save()
japanese = MyModel.objects.get(pk=1, language_code='ja')
"""
if not instance:
raise AttributeError()
tmodel = instance._meta.translations_model
try:
other_lang = get_translation(instance, value)
except tmodel.DoesNotExist:
other_lang = tmodel()
for field in other_lang._meta.get_all_field_names():
val = getattr(instance, field, NULL)
if val is NULL:
continue
if field == 'pk':
continue
if field == tmodel._meta.pk.name:
continue
if callable(getattr(val, 'all', None)):
val = val.all()
setattr(other_lang, field, val)
other_lang.language_code = value
other_lang.master = instance
setattr(instance, instance._meta.translations_cache, other_lang)
def __delete__(self, instance):
if not instance:
raise AttributeError()
raise AttributeError("The 'language_code' attribute cannot be deleted!")
|
[
"jonas.obrist@divio.ch"
] |
jonas.obrist@divio.ch
|
49fd8a9fff68e64f5570f9b1bf88715332d7a98f
|
8761930dec0c7469837a2e6d2722e477b8a8d871
|
/pyguide/modules.py
|
7352e96bd668a03398567e9ffe578d45d73cd07c
|
[] |
no_license
|
chirs/compsci
|
3a43848af7ef3f0f24944ea8a6f8ccb439cbf1aa
|
938e7a80d297900d9430c9cc79ebf03633ecff12
|
refs/heads/master
| 2021-01-13T01:37:19.479577
| 2014-11-21T03:31:52
| 2014-11-21T03:31:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
import datetime
#Modules
#-------
# Explore import here also.
# modules are singleton objects - they get loaded into sys.modules once
datetime.a = 4
# What's going on here?
from datetime import a
print a # 4
#packages
#TOM: ?
#Or of course if you do the import yourself with __import__ or imp.load_module or something else under the hood
|
[
"chrisedgemon@gmail.com"
] |
chrisedgemon@gmail.com
|
48af6b4bd9215f66f7f5723d3b95fb70b1a04d53
|
2c5f6829888bf75122f67931ce8d324535398594
|
/python-solutions/string/345_reverse_vowels_string.py
|
d996c39071db15da79f5e7a038822ac3dcd41121
|
[] |
no_license
|
jvanvari/LeetCode
|
6880b95086bf0ea4f35b4be346a017674475d80b
|
1b6d9145e6dacb9e940a4dee872d7935b0518b4f
|
refs/heads/master
| 2022-11-24T18:47:42.729896
| 2020-07-31T17:02:47
| 2020-07-31T17:02:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
class Solution:
def reverseVowels(self, s: str) -> str: # function name should be lower case , leetcode doesn't follow the convention
c = list(s) # string to list
j = len(s) - 1
i = 0
vowel = ["a", "e", "i", "o", "u", 'A', 'E', 'I', 'O', 'U']
while (i < j):
if c[i] in vowel and c[j] in vowel:
c[i], c[j] = c[j], c[i] # tuple swap
j = j - 1
i = i + 1
elif c[i] in vowel:
j = j - 1
elif c[j] in vowel:
i = i + 1
else:
j = j - 1
i = i + 1
return ''.join(c)
|
[
"gvanvari@gmail.com"
] |
gvanvari@gmail.com
|
450cd3056ff7d6cbe3eb2a316b2bbc7c16e92239
|
4bed188b7c145c0863d1e6dfeb4eb27ef849ce2f
|
/486_ws/build/astra_camera/catkin_generated/pkg.develspace.context.pc.py
|
48da2c7b35c61cab6fefcfab9d430b6449f0f0b9
|
[] |
no_license
|
ihelal/MSE486
|
50e6f223f601153fe42140a0111bfdac5e20dd92
|
0a22f49cfb75279ce70f81e73f978a3efd157029
|
refs/heads/master
| 2022-12-08T21:46:42.080565
| 2020-08-18T01:50:12
| 2020-08-18T01:50:12
| 288,327,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/hima/486_ws/devel/include;/home/hima/486_ws/src/astra_camera/include".split(';') if "/home/hima/486_ws/devel/include;/home/hima/486_ws/src/astra_camera/include" != "" else []
PROJECT_CATKIN_DEPENDS = "camera_info_manager;dynamic_reconfigure;image_transport;nodelet;sensor_msgs;roscpp;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lastra_wrapper".split(';') if "-lastra_wrapper" != "" else []
PROJECT_NAME = "astra_camera"
PROJECT_SPACE_DIR = "/home/hima/486_ws/devel"
PROJECT_VERSION = "0.3.0"
|
[
"ihelal@sfu.ca"
] |
ihelal@sfu.ca
|
b39dc37ecd16c26aa179b0a503d9568e9c4d3ed5
|
003dd45d19b5a6fd4a04deeefa63756462dbe09d
|
/pymoo/visualization/scatter.py
|
cbd1fb1335ff582b2b3faf26749989e19329547c
|
[
"Apache-2.0"
] |
permissive
|
Flytortoise/pymoo
|
51d32793e843977bd8fda0226bb6add1c356e21d
|
c6426a721d95c932ae6dbb610e09b6c1b0e13594
|
refs/heads/master
| 2023-09-03T20:54:13.284192
| 2021-11-09T13:23:15
| 2021-11-09T13:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,926
|
py
|
import numpy as np
from pymoo.docs import parse_doc_string
from pymoo.core.plot import Plot
from pymoo.util.misc import set_if_none
class Scatter(Plot):
def __init__(self,
angle=(45, 45),
**kwargs):
"""
Scatter Plot
Parameters
----------------
axis_style : {axis_style}
endpoint_style : dict
Endpoints are drawn at each extreme point of an objective. This style can be modified.
labels : {labels}
Other Parameters
----------------
figsize : {figsize}
title : {title}
legend : {legend}
tight_layout : {tight_layout}
cmap : {cmap}
"""
super().__init__(**kwargs)
self.angle = angle
def _do(self):
is_1d = (self.n_dim == 1)
is_2d = (self.n_dim == 2)
is_3d = (self.n_dim == 3)
more_than_3d = (self.n_dim > 3)
# create the figure and axis objects
if is_1d or is_2d:
self.init_figure()
elif is_3d:
self.init_figure(plot_3D=True)
elif more_than_3d:
self.init_figure(n_rows=self.n_dim, n_cols=self.n_dim)
# now plot data points for each entry
for k, (F, kwargs) in enumerate(self.to_plot):
# copy the arguments and set the default color
_kwargs = kwargs.copy()
set_if_none(_kwargs, "color", self.colors[k % len(self.colors)])
# determine the plotting type - scatter or line
_type = _kwargs.get("plot_type")
if "plot_type" in _kwargs:
del _kwargs["plot_type"]
if is_1d:
F = np.column_stack([F, np.zeros(len(F))])
labels = self.get_labels() + [""]
self.plot(self.ax, _type, F, **_kwargs)
self.set_labels(self.ax, labels, False)
elif is_2d:
self.plot(self.ax, _type, F, **_kwargs)
self.set_labels(self.ax, self.get_labels(), False)
elif is_3d:
set_if_none(_kwargs, "alpha", 1.0)
self.plot(self.ax, _type, F, **_kwargs)
self.ax.xaxis.pane.fill = False
self.ax.yaxis.pane.fill = False
self.ax.zaxis.pane.fill = False
self.set_labels(self.ax, self.get_labels(), True)
if self.angle is not None:
self.ax.view_init(*self.angle)
else:
labels = self.get_labels()
for i in range(self.n_dim):
for j in range(self.n_dim):
ax = self.ax[i, j]
if i != j:
self.plot(ax, _type, F[:, [i, j]], **_kwargs)
self.set_labels(ax, [labels[i], labels[j]], is_3d)
else:
ax.set_xticks([])
ax.set_yticks([])
ax.scatter(0, 0, s=1, color="white")
ax.text(0, 0, labels[i], ha='center', va='center', fontsize=20)
return self
def plot(self, ax, _type, F, **kwargs):
is_3d = F.shape[1] == 3
if _type is None:
_type = "scatter"
if _type == "scatter":
if is_3d:
ax.scatter(F[:, 0], F[:, 1], F[:, 2], **kwargs)
else:
ax.scatter(F[:, 0], F[:, 1], **kwargs)
else:
if is_3d:
ax.plot_trisurf(F[:, 0], F[:, 1], F[:, 2], **kwargs)
else:
ax.plot(F[:, 0], F[:, 1], **kwargs)
def set_labels(self, ax, labels, is_3d):
# set the labels for each axis
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
if is_3d:
ax.set_zlabel(labels[2])
parse_doc_string(Scatter.__init__)
|
[
"blankjul@egr.msu.edu"
] |
blankjul@egr.msu.edu
|
74f8fbad03d961ee5324519e9dca1706e4a4260b
|
fd87ad189eda8f862b01f69b3c159b07cefd9ffe
|
/conftest.py
|
dc101ae0102cd7b70386397204352d556f94952d
|
[] |
no_license
|
nsteinme/phy
|
d764c6c54ff86a80b53053a0a104bfe0720b9214
|
47694b2e5f0c53056cb2f6ae16145d0553d7bc3b
|
refs/heads/master
| 2021-01-18T15:26:43.954619
| 2015-07-27T21:29:47
| 2015-07-27T21:29:47
| 38,246,841
| 2
| 0
| null | 2015-06-29T12:36:51
| 2015-06-29T12:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,613
|
py
|
# -*- coding: utf-8 -*-
"""py.test utilities."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os
import numpy as np
from pytest import yield_fixture
from phy.electrode.mea import load_probe
from phy.io.mock import artificial_traces
from phy.utils._types import Bunch
from phy.utils.tempdir import TemporaryDirectory
from phy.utils.settings import _load_default_settings
from phy.utils.datasets import download_test_data
#------------------------------------------------------------------------------
# Common fixtures
#------------------------------------------------------------------------------
@yield_fixture
def tempdir():
with TemporaryDirectory() as tempdir:
yield tempdir
@yield_fixture
def chdir_tempdir():
curdir = os.getcwd()
with TemporaryDirectory() as tempdir:
os.chdir(tempdir)
yield tempdir
os.chdir(curdir)
@yield_fixture
def tempdir_bis():
with TemporaryDirectory() as tempdir:
yield tempdir
@yield_fixture(params=['null', 'artificial', 'real'])
def raw_dataset(request):
sample_rate = 20000
params = _load_default_settings()['spikedetekt']
data_type = request.param
if data_type == 'real':
path = download_test_data('test-32ch-10s.dat')
traces = np.fromfile(path, dtype=np.int16).reshape((200000, 32))
traces = traces[:45000]
n_samples, n_channels = traces.shape
params['use_single_threshold'] = False
probe = load_probe('1x32_buzsaki')
else:
probe = {'channel_groups': {
0: {'channels': [0, 1, 2],
'graph': [[0, 1], [0, 2], [1, 2]],
},
1: {'channels': [3],
'graph': [],
'geometry': {3: [0., 0.]},
}
}}
if data_type == 'null':
n_samples, n_channels = 25000, 4
traces = np.zeros((n_samples, n_channels))
elif data_type == 'artificial':
n_samples, n_channels = 25000, 4
traces = artificial_traces(n_samples, n_channels)
traces[5000:5010, 1] *= 5
traces[15000:15010, 3] *= 5
n_samples_w = params['extract_s_before'] + params['extract_s_after']
yield Bunch(n_channels=n_channels,
n_samples=n_samples,
sample_rate=sample_rate,
n_samples_waveforms=n_samples_w,
traces=traces,
params=params,
probe=probe,
)
|
[
"cyrille.rossant@gmail.com"
] |
cyrille.rossant@gmail.com
|
39bdcba066a9a4034929eb6e61cc0bb409e80dfd
|
7e9a2aabeab0309080d5ff930dcb985140f7e99f
|
/blog/migrations/0003_comment.py
|
157b3752e2e754fd2a1a2fa6ec5357fbee4d4d0f
|
[] |
no_license
|
bengbenx/django_python
|
db994bf1c1035ed02514d60ebf404076e02a5233
|
6d33449c160f72c7de4cfe1aff56f9c719273666
|
refs/heads/master
| 2022-10-20T17:03:30.229271
| 2020-06-13T07:58:46
| 2020-06-13T07:58:46
| 271,693,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
# Generated by Django 3.0.6 on 2020-06-09 02:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20200608_1205'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
options={
'ordering': ('created',),
},
),
]
|
[
"bengbenx@gmail.com"
] |
bengbenx@gmail.com
|
62e66734d55829920dfe36a63b969fa1804bec35
|
c44814ad5c2762bf718012a0d2330fa2130e0fc3
|
/datadog_checks_dev/datadog_checks/dev/tooling/configuration/template.py
|
1119022bb15d3c87f96d36d243521dc1584a70e7
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lucasviecelli/integrations-core
|
450f551ddd89f95c429c16a8c4aefc5534b0638c
|
1a0c635c7fbd921c2672762efdc709a3ab6b647d
|
refs/heads/master
| 2022-12-13T11:49:28.449631
| 2020-09-09T13:20:03
| 2020-09-09T13:20:03
| 293,894,322
| 0
| 0
|
BSD-3-Clause
| 2020-09-08T18:27:17
| 2020-09-08T18:27:16
| null |
UTF-8
|
Python
| false
| false
| 5,188
|
py
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from copy import deepcopy
import yaml
from ...utils import file_exists, get_parent_dir, path_join, read_file
TEMPLATES_DIR = path_join(get_parent_dir(get_parent_dir(__file__)), 'templates', 'configuration')
VALID_EXTENSIONS = ('yaml', 'yml')
class ConfigTemplates(object):
def __init__(self, paths=None):
self.templates = {}
self.paths = []
if paths:
self.paths.extend(paths)
self.paths.append(TEMPLATES_DIR)
def load(self, template):
path_parts = template.split('/')
branches = path_parts.pop().split('.')
path_parts.append(branches.pop(0))
possible_template_paths = (
f'{path_join(path, *path_parts)}.{extension}' for path in self.paths for extension in VALID_EXTENSIONS
)
for template_path in possible_template_paths:
if file_exists(template_path):
break
else:
raise ValueError(f"Template `{'/'.join(path_parts)}` does not exist")
if template_path in self.templates:
data = self.templates[template_path]
else:
try:
data = yaml.safe_load(read_file(template_path))
except Exception as e:
raise ValueError(f'Unable to parse template `{template_path}`: {e}')
self.templates[template_path] = data
data = deepcopy(data)
for i, branch in enumerate(branches):
if isinstance(data, dict):
if branch in data:
data = data[branch]
else:
raise ValueError(f"Template `{'/'.join(path_parts)}` has no element `{'.'.join(branches[:i + 1])}`")
elif isinstance(data, list):
for item in data:
if isinstance(item, dict) and item.get('name') == branch:
data = item
break
else:
raise ValueError(
'Template `{}` has no named element `{}`'.format(
'/'.join(path_parts), '.'.join(branches[: i + 1])
)
)
else:
raise ValueError(
'Template `{}.{}` does not refer to a mapping, rather it is type `{}`'.format(
'/'.join(path_parts), '.'.join(branches[:i]), type(data).__name__
)
)
return data
@staticmethod
def apply_overrides(template, overrides):
errors = []
for override, value in sorted(overrides.items()):
root = template
override_keys = override.split('.')
final_key = override_keys.pop()
intermediate_error = ''
# Iterate through all but the last key, attempting to find a dictionary at every step
for i, key in enumerate(override_keys):
if isinstance(root, dict):
if key in root:
root = root[key]
else:
intermediate_error = (
f"Template override `{'.'.join(override_keys[:i])}` has no named mapping `{key}`"
)
break
elif isinstance(root, list):
for item in root:
if isinstance(item, dict) and item.get('name') == key:
root = item
break
else:
intermediate_error = (
f"Template override `{'.'.join(override_keys[:i])}` has no named mapping `{key}`"
)
break
else:
intermediate_error = (
f"Template override `{'.'.join(override_keys[:i])}` does not refer to a mapping"
)
break
if intermediate_error:
errors.append(intermediate_error)
continue
# Force assign the desired value to the final key
if isinstance(root, dict):
root[final_key] = value
elif isinstance(root, list):
for i, item in enumerate(root):
if isinstance(item, dict) and item.get('name') == final_key:
root[i] = value
break
else:
intermediate_error = 'Template override has no named mapping `{}`'.format(
'.'.join(override_keys) if override_keys else override
)
else:
intermediate_error = 'Template override `{}` does not refer to a mapping'.format(
'.'.join(override_keys) if override_keys else override
)
if intermediate_error:
errors.append(intermediate_error)
continue
overrides.pop(override)
return errors
|
[
"noreply@github.com"
] |
lucasviecelli.noreply@github.com
|
c870878836b7d664a633977aabdb7e7089083792
|
8f9ae6aa205f7b5b29dcdd4920ccdab836de417b
|
/travelapp/migrations/0001_initial.py
|
8e50df67e14afa256b14ba0f58815c36e7244c84
|
[] |
no_license
|
hiranpradeep/travelproject
|
492cba273aa6f86727235ee7a366908d1542731d
|
91790672869ae9cc4bfe935abbdcab86eb6f7348
|
refs/heads/master
| 2023-08-20T23:21:52.339314
| 2021-09-09T14:23:29
| 2021-09-09T14:23:29
| 404,752,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
# Generated by Django 3.2.4 on 2021-06-12 06:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Place',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('image', models.ImageField(upload_to='pics')),
('desc', models.TextField()),
],
),
]
|
[
"55525087+hiranpradeep@users.noreply.github.com"
] |
55525087+hiranpradeep@users.noreply.github.com
|
880299dd33d067783ba797ec71a1c73b24352600
|
fb67f2bdaae627cfecca0802664239b285c4a199
|
/api/v1/views/cities.py
|
2845a560a00af28b18b659e72170800c7ddff151
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
ManuBedoya/AirBnB_clone_v3
|
541bbd36584ed8b03c2c679ce4771a18d6dc2965
|
79a770b059b9a66e95e36b7998b31004e2429f13
|
refs/heads/master
| 2023-08-24T23:50:07.920974
| 2021-09-20T17:47:10
| 2021-09-20T17:47:10
| 407,667,339
| 0
| 0
| null | 2021-09-20T23:18:07
| 2021-09-17T20:09:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,650
|
py
|
#!/usr/bin/python3
""" This module creates view for City objects """
from api.v1.views import app_views
from models import storage
from flask import jsonify, abort, request
from models.state import State
from models.city import City
@app_views.route('/states/<state_id>/cities', strict_slashes=False)
def display_cities(state_id):
"""display the cities of a state"""
try:
states = storage.get(State, state_id)
if states is None:
abort(404)
city_list = []
all_cities = storage.all(City)
for city in all_cities.values():
if city.state_id == state_id:
city_list.append(city.to_dict())
return jsonify(city_list)
except:
abort(404)
@app_views.route('/cities/<city_id>', strict_slashes=False)
def display_city(city_id):
"""display a city"""
try:
city = storage.get(City, city_id)
if city is None:
abort(404)
return jsonify(city.to_dict())
except:
abort(404)
@app_views.route(
'/cities/<city_id>',
methods=['DELETE'],
strict_slashes=False)
def delete_city(city_id):
""" Deletes a city """
try:
city = storage.get(City, city_id)
if city is None:
abort(404)
storage.delete(city)
storage.save()
return jsonify({}), 200
except:
abort(404)
@app_views.route(
'/states/<state_id>/cities',
methods=['POST'],
strict_slashes=False)
def post_city(state_id):
""" Create new city """
try:
if storage.get(State, state_id) is None:
abort(404)
if not request.get_json():
return jsonify({'error': 'Not a JSON'}), 400
if 'name' not in list(request.get_json().keys()):
return jsonify({'error': 'Missing name'}), 400
request_city = request.get_json().copy()
request_city['state_id'] = state_id
city = City(**request_city)
city.save()
return jsonify(city.to_dict()), 201
except:
abort(404)
@app_views.route('/cities/<city_id>', methods=['PUT'], strict_slashes=False)
def put_city(city_id):
""" update city with specified id """
try:
city = storage.get(City, city_id)
if city is None:
abort(404)
if not request.get_json():
return jsonify({'error': 'Not a JSON'}), 400
for key, value in request.get_json().items():
if key not in ['id', 'state_id', 'created_at', 'updated_at']:
setattr(city, key, value)
city.save()
return jsonify(city.to_dict()), 200
except:
abort(404)
|
[
"adriana.er98@hotmail.com"
] |
adriana.er98@hotmail.com
|
2bd27a28d246409a5b24c67b8e913af0c05c7fb8
|
a67a987ed078da0a1de2908c8c0e08070dee65b1
|
/genice/lattices/Struct55.py
|
ec04992940ba97dbf33d7c7201fa089e0663363f
|
[] |
no_license
|
Python3pkg/GenIce
|
ef1ce7ee2997c10e08dde75ac36050a653cd4fc5
|
1e9458b7bf8e0fd2ad5d0c4f8987cea0ae7ca0b0
|
refs/heads/master
| 2021-01-21T17:31:51.595858
| 2017-05-21T14:09:32
| 2017-05-21T14:09:32
| 91,962,047
| 0
| 0
| null | 2017-05-21T14:09:28
| 2017-05-21T14:09:28
| null |
UTF-8
|
Python
| false
| false
| 6,336
|
py
|
"""
Data source: Dutour Sikirić, Mathieu, Olaf Delgado-Friedrichs, and Michel Deza. “Space Fullerenes: a Computer Search for New Frank-Kasper Structures” Acta Crystallographica Section A Foundations of Crystallography 66.Pt 5 (2010): 602–615.
Cage composition:
(12,14,15,16) = (14,4,4,4,)
"""
pairs="""
5 77
135 53
21 92
43 4
136 98
28 82
28 59
89 58
131 142
67 1
59 37
34 139
18 115
14 113
66 50
143 104
35 27
115 140
85 49
5 7
18 4
89 82
136 88
36 29
145 41
105 0
140 93
101 13
99 75
109 76
5 97
11 38
131 26
12 39
0 55
38 129
134 146
137 118
62 72
81 9
102 117
126 123
88 14
25 22
105 117
44 112
29 144
106 69
88 51
80 122
139 36
126 109
30 86
56 14
23 131
77 11
102 48
32 7
90 8
67 79
27 130
125 147
40 14
33 90
59 79
143 56
61 80
128 7
106 95
94 107
137 17
15 43
45 87
129 9
68 147
50 88
39 99
128 90
130 91
2 17
105 123
19 40
20 40
83 106
3 138
64 119
3 137
2 138
115 24
70 129
69 130
134 112
15 101
89 120
145 107
30 100
100 98
37 95
13 50
117 109
10 146
96 55
56 98
118 76
54 56
121 96
26 47
36 41
139 67
48 58
83 92
33 60
12 147
44 124
105 57
140 43
121 6
6 120
74 128
126 46
62 24
30 101
103 143
114 27
52 96
102 130
32 33
136 101
46 53
140 72
83 91
21 17
92 81
19 71
73 45
77 121
132 29
33 111
110 37
71 73
51 42
122 13
63 93
135 124
84 131
77 23
94 70
111 98
66 57
102 133
119 138
63 15
31 45
46 113
97 83
103 61
13 65
58 41
89 107
31 15
93 8
108 124
34 44
12 52
145 26
85 11
78 16
136 4
78 63
119 125
127 118
138 121
127 87
32 23
95 21
18 111
82 99
71 75
97 132
103 113
116 86
80 66
87 72
145 141
139 74
18 86
110 69
76 133
3 52
115 90
84 79
85 60
73 27
104 146
85 97
135 93
110 82
1 144
25 51
42 123
84 104
60 100
95 1
7 21
134 144
24 74
48 0
3 22
126 75
116 47
74 132
125 6
57 68
125 9
143 142
28 94
31 114
19 124
79 41
0 120
104 141
80 42
84 94
22 65
19 46
34 10
107 38
34 62
20 61
32 49
64 81
127 22
122 25
78 30
78 24
4 25
123 39
10 53
11 26
37 133
109 87
60 16
137 81
71 57
70 96
68 99
54 47
135 134
64 91
31 65
132 16
133 17
122 86
54 141
8 144
92 49
42 113
35 75
114 118
45 108
58 69
64 114
50 147
2 91
128 1
142 111
39 55
10 103
28 55
117 73
20 44
141 112
48 59
106 29
66 40
20 54
119 65
129 49
112 36
35 110
146 67
35 76
38 6
5 2
51 12
116 142
47 100
68 120
52 9
63 108
62 108
127 43
16 8
61 116
53 72
23 70
"""
waters="""
0.52958 0.71109 0.0
0.83485 0.01207 0.30681
0.02958 0.78892 0.0
0.15417 0.64074 0.18181
0.15913 0.37609 0.30681
0.07948 0.90876 0.0
0.34046 0.76284 0.81819
0.03974 0.95438 0.18181
0.94491 0.15486 0.5
0.21941 0.73328 0.5
0.65955 0.23716 0.18181
0.28 0.96012 0.81819
0.3502 0.59717 0.375
0.22 0.46012 0.81819
0.44491 0.34514 0.5
0.03454 0.41081 0.625
0.02439 0.13189 0.69319
0.96546 0.77092 0.18181
0.15955 0.26284 0.18181
0.64981 0.40283 0.625
0.53455 0.27092 0.81819
0.97561 0.86811 0.30681
0.12458 0.53355 0.125
0.28 0.96012 0.18181
0.97042 0.21109 0.0
0.22 0.46012 0.18181
0.37542 0.03355 0.875
0.84087 0.62391 0.69319
0.54452 0.82053 0.30681
0.83485 0.01207 0.69319
0.15955 0.26284 0.81819
0.0 0.5 0.75
0.16516 0.98793 0.30681
0.14981 0.09717 0.375
0.72042 0.2214 0.0
0.76577 0.64173 0.5
0.72 0.03988 0.81819
0.76493 0.82345 0.30681
0.34087 0.87609 0.69319
0.47561 0.63189 0.30681
0.52439 0.36811 0.69319
0.62458 0.96645 0.875
0.46026 0.45438 0.18181
0.03454 0.41081 0.375
0.65955 0.23716 0.81819
0.87542 0.46645 0.875
0.64981 0.40283 0.375
0.34584 0.14074 0.81819
0.65458 0.79078 0.0
0.16515 0.91966 0.5
0.33485 0.48793 0.69319
0.33485 0.48793 0.30681
0.26493 0.67655 0.30681
0.73507 0.32345 0.30681
0.45548 0.17947 0.69319
0.46546 0.72908 0.18181
0.40559 0.23104 0.5
0.53974 0.54562 0.81819
0.65417 0.85926 0.81819
0.65417 0.85926 0.18181
0.14981 0.09717 0.625
0.47042 0.28892 0.0
0.84542 0.29078 0.0
0.95548 0.32053 0.69319
0.04452 0.67947 0.69319
0.12458 0.53355 0.875
0.46026 0.45438 0.81819
0.72 0.03988 0.18181
0.47561 0.63189 0.69319
0.76493 0.82345 0.69319
0.34087 0.87609 0.30681
0.66516 0.51207 0.69319
0.84584 0.35926 0.18181
0.78 0.53988 0.81819
0.92053 0.09124 0.0
0.66515 0.58034 0.5
0.84087 0.62391 0.30681
0.23406 0.90361 0.0
0.03455 0.22908 0.81819
0.62458 0.96645 0.125
0.42053 0.40876 0.0
0.09441 0.73104 0.5
0.59441 0.76896 0.5
0.97561 0.86811 0.69319
0.5 0.0 0.25
0.16516 0.98793 0.69319
0.22042 0.27861 0.0
0.87542 0.46645 0.125
0.33485 0.41966 0.5
0.54452 0.82053 0.69319
0.02439 0.13189 0.30681
0.96546 0.77092 0.81819
0.05509 0.84514 0.5
0.90559 0.26896 0.5
0.46546 0.91081 0.375
0.8502 0.90283 0.375
0.34046 0.76284 0.18181
0.03974 0.95438 0.81819
0.28059 0.23328 0.5
0.55509 0.65486 0.5
0.23507 0.17655 0.69319
0.15913 0.37609 0.69319
0.77958 0.7214 0.0
0.53455 0.27092 0.18181
0.53454 0.08919 0.375
0.57948 0.59124 0.0
0.8502 0.90283 0.625
0.46546 0.91081 0.625
0.84584 0.35926 0.81819
0.78 0.53988 0.18181
0.71941 0.76673 0.5
0.23507 0.17655 0.30681
0.65913 0.12391 0.69319
0.52439 0.36811 0.30681
0.96546 0.58919 0.625
0.03455 0.22908 0.18181
0.34542 0.20922 0.0
0.73406 0.5964 0.0
0.96546 0.58919 0.375
0.15417 0.64074 0.81819
0.46546 0.72908 0.81819
0.27958 0.77861 0.0
0.26594 0.40361 0.0
0.53974 0.54562 0.18181
0.73507 0.32345 0.69319
0.26493 0.67655 0.69319
0.66516 0.51207 0.30681
0.0 0.5 0.25
0.96026 0.04562 0.18181
0.26577 0.85828 0.5
0.84046 0.73716 0.81819
0.37542 0.03355 0.125
0.96026 0.04562 0.81819
0.84046 0.73716 0.18181
0.73424 0.14173 0.5
0.78059 0.26673 0.5
0.23424 0.35828 0.5
0.04452 0.67947 0.30681
0.15458 0.70922 0.0
0.76594 0.0964 0.0
0.95548 0.32053 0.30681
0.53454 0.08919 0.625
0.34584 0.14074 0.18181
0.45548 0.17947 0.30681
0.83485 0.08034 0.5
0.5 0.0 0.75
0.65913 0.12391 0.30681
0.3502 0.59717 0.625
"""
coord= "relative"
cages="""
14 0.45938 0.88877 0.0
12 0.0 0.0 0.5
15 0.34271 0.04359 0.5
16 -0.15896 -0.09061 0.0
12 0.84105 0.18248 -0.22725
12 0.34105 0.31752 0.22725
12 -0.09755 -0.25447 0.5
16 0.34104 0.59061 0.0
12 0.09755 0.25447 0.5
12 0.5 0.5 -0.5
15 0.15729 0.54359 -0.5
14 -0.04062 -0.38877 0.0
16 0.65896 0.40939 0.0
12 -0.84105 -0.18248 0.22725
12 -0.34105 -0.31752 -0.22725
12 -0.15895 -0.81752 0.22725
15 0.84271 0.45641 -0.5
12 0.59755 0.24553 -0.5
14 0.04062 0.38877 0.0
15 -0.34271 -0.04359 0.5
12 0.15895 0.81752 -0.22725
14 0.54062 0.11123 0.0
12 -0.34105 -0.31752 0.22725
12 0.34105 0.31752 -0.22725
12 0.40245 0.75447 -0.5
16 0.15896 0.09061 0.0
"""
bondlen = 3
celltype = 'rect'
cell = """
22.29787574334263 26.193104175639082 13.5197850518029
"""
density = 0.5602388491410102
|
[
"vitroid@gmail.com"
] |
vitroid@gmail.com
|
36337609b3b76fe6dbe417039e389f275608fd31
|
fcc01cfc2476e0a88eca1a616285a4664517260a
|
/utils/plotting.py
|
1a75482237267dd4832c75bb4c8008a614a94189
|
[
"MIT"
] |
permissive
|
AlessandroStaffolani/reinforcement-learning-openai-gym
|
8411883107086b7aa237cfc7e0439bbab7768f6b
|
0b918701ab190f22c5973bd798961b389d43a2d1
|
refs/heads/master
| 2023-01-14T05:19:48.290206
| 2019-10-04T12:46:35
| 2019-10-04T12:46:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,261
|
py
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from collections import namedtuple
import torch
EpisodeStats = namedtuple("Stats", ["episode_lengths", "episode_rewards", "episode_epsilon", "episode_alpha"])
def plot_episode_stats(stats, n_episodes, smoothing_window=10, noshow=False, goal_value=None, fig_size=(15, 8), ada_divisor=25, show_params=False):
# Plot the episode length over time
fig1 = plt.figure(figsize=fig_size)
plt.plot(stats.episode_lengths)
plt.xlabel("Episode")
plt.ylabel("Episode Length")
plt.title("Episode Length over Time")
if noshow:
plt.close(fig1)
else:
plt.show(fig1)
# Plot the episode reward over time
fig2 = plt.figure(figsize=fig_size)
rewards_smoothed = pd.Series(stats.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()
plt.plot(rewards_smoothed)
plt.xlabel("Episode")
plt.ylabel("Episode Reward (Smoothed)")
title = "Episode Reward over Time (Smoothed over window size {})".format(smoothing_window)
if goal_value is not None:
plt.axhline(goal_value, color='g', linestyle='dashed')
title = "Episode Reward over Time (Smoothed over window size" \
" " + str(smoothing_window) + ", goal value " + str(goal_value) + ")"
plt.title(title)
if noshow:
plt.close(fig2)
else:
plt.show(fig2)
# Plot time steps and episode number
fig3 = plt.figure(figsize=fig_size)
plt.plot(np.cumsum(stats.episode_lengths), np.arange(len(stats.episode_lengths)))
plt.xlabel("Time Steps")
plt.ylabel("Episode")
plt.title("Episode per time step")
if noshow:
plt.close(fig3)
else:
plt.show(fig3)
if show_params:
# Plot Epsilon over episode
fig4 = plt.figure(figsize=(15, 8))
plt.plot(np.arange(n_episodes), stats.episode_epsilon)
plt.xlabel("Episode t")
plt.ylabel("Epsilon")
plt.title("Epsilon over episode using ada_divisor of {}".format(ada_divisor))
if noshow:
plt.close(fig4)
else:
plt.show(fig4)
# Plot Epsilon over episode
fig5 = plt.figure(figsize=(15, 8))
plt.plot(np.arange(n_episodes), stats.episode_alpha)
plt.xlabel("Episode t")
plt.ylabel("Alpha")
plt.title("Alpha over episode using ada_divisor of {}".format(ada_divisor))
if noshow:
plt.close(fig5)
else:
plt.show(fig5)
return fig1, fig2, fig3, fig4, fig5
def plot_durations(episode_durations, is_ipython):
plt.figure(2)
plt.clf()
durations_t = torch.tensor(episode_durations, dtype=torch.float)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
from IPython import display
display.clear_output(wait=True)
display.display(plt.gcf())
|
[
"alestam93@gmail.com"
] |
alestam93@gmail.com
|
7325633f451eb4099de253832dc888fc40c95c22
|
4a58ba9f8922fcc5a5c17658bb3ac420e2d12a2f
|
/Ddmall001/jktest.py
|
8191c56867623a939e63beddfc9db2b3a7cf63c6
|
[] |
no_license
|
xixils520/guest
|
92838da49a7087f997eead32da25a810c3edbabf
|
177b2415ddcb393c7e7c7c5c5b97b3b5deca73f5
|
refs/heads/master
| 2021-04-26T22:07:40.134536
| 2018-03-06T05:31:50
| 2018-03-06T05:31:50
| 124,025,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# coding=utf-8
import requests
import json
headers= {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'Content-Type':'application/json;charset=UTF-8'}
login_url='http://192.168.1.101:52400/user/login'
login_data='{"username":"12345678910","password":"123456"}'
login_response=requests.post(url=login_url,data=login_data,headers=headers)
data=json.loads(login_response.text)
print (json.dumps(data, sort_keys=True, indent=2,ensure_ascii=False))
|
[
"xixils520@gmail.com"
] |
xixils520@gmail.com
|
0af80fed1a7126442b14af7f6a1141a480477074
|
747f759311d404af31c0f80029e88098193f6269
|
/extra-addons/training/migrations/0.3/post-10-update_course_wkf.py
|
f69865fe29f835caa11d7778b4ad27f4cf733649
|
[] |
no_license
|
sgeerish/sirr_production
|
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
|
1081f3a5ff8864a31b2dcd89406fac076a908e78
|
refs/heads/master
| 2020-05-19T07:21:37.047958
| 2013-09-15T13:03:36
| 2013-09-15T13:03:36
| 9,648,444
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
__name__ = "Update Course Workflow"
def migrate(cr, v):
cr.execute("UPDATE wkf_instance w SET state = 'active' WHERE res_type = 'training.course' AND EXISTS (SELECT 1 FROM training_course WHERE id = w.res_id AND state_course = 'validated')")
|
[
"geerish@omerp.net"
] |
geerish@omerp.net
|
f484491335cc03a29a5d5869e624a809b2129b33
|
06ef2a332d2d0f4679a3bcd4c1c22b884afc8ef0
|
/wafers/docs/conf.py
|
7eb4dea27602f2016c34b3bd1f03755bff13a305
|
[
"MIT"
] |
permissive
|
anmol101093AIML/mlops_main
|
64d5c054cef924ad66cedef17babb03b9017146e
|
087be03bb6f651d766924deeee7ee09e7185a194
|
refs/heads/main
| 2023-08-04T19:32:32.859214
| 2021-09-23T09:36:18
| 2021-09-23T09:36:18
| 409,082,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,780
|
py
|
# -*- coding: utf-8 -*-
#
# wafers documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wafers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wafersdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'wafers.tex',
u'wafers Documentation',
u"anmol_dhankhar", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wafers', u'wafers Documentation',
[u"anmol_dhankhar"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wafers', u'wafers Documentation',
u"anmol_dhankhar", 'wafers',
'its an wafer project using mlops to detect a semi is okay or not', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
[
"er.anmol.dhankhar93@gmail.com"
] |
er.anmol.dhankhar93@gmail.com
|
1f5f4d9aafa3c3abb5ad2c92c1f47e4ff5fe4d89
|
bfd75153048a243b763614cf01f29f5c43f7e8c9
|
/1906101111-柏春宇/day0306/作业1.py
|
0d355bdd03882010d791f76d1c732540f475436f
|
[] |
no_license
|
gschen/sctu-ds-2020
|
d2c75c78f620c9246d35df262529aa4258ef5787
|
e1fd0226b856537ec653c468c0fbfc46f43980bf
|
refs/heads/master
| 2021-01-01T11:06:06.170475
| 2020-07-16T03:12:13
| 2020-07-16T03:12:13
| 239,245,834
| 17
| 10
| null | 2020-04-18T13:46:24
| 2020-02-09T04:22:05
|
Python
|
UTF-8
|
Python
| false
| false
| 157
|
py
|
class PersonInfo():
name='柏春宇'
age=18
sex='男'
def P(self,name,age,sex):
print(name,age,sex)
X=PersonInfo()
print(X.P)
|
[
"2785504109qq.com"
] |
2785504109qq.com
|
718d9eea88040e361a023abf020c4fc138312f0d
|
3e306d0ec56608259e36c9fe28c95ab5bd58147c
|
/keras/tests/keras_doctest.py
|
139432849685fe3ff49519a92016c6bd872d1936
|
[
"Apache-2.0"
] |
permissive
|
Alan-love/keras
|
8012319eb3f88bfb3806e9df913f62b442701137
|
6c392b5ad96fb47a05019e6dda42d2af1f1ec08e
|
refs/heads/master
| 2023-08-22T17:44:36.217261
| 2022-03-29T23:06:19
| 2022-03-29T23:06:50
| 209,978,278
| 0
| 0
|
Apache-2.0
| 2022-03-31T03:09:20
| 2019-09-21T12:05:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,534
|
py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run doctests for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import flags
from absl.testing import absltest
from keras.testing_infra import keras_doctest_lib
import numpy as np
import tensorflow as tf
import tensorflow.compat.v2 as tf
tf.compat.v1.enable_v2_behavior()
# We put doctest after absltest so that it picks up the unittest monkeypatch.
# Otherwise doctest tests aren't runnable at all.
import doctest # pylint: disable=g-import-not-at-top,g-bad-import-order
FLAGS = flags.FLAGS
flags.DEFINE_string('module', None, 'A specific module to run doctest on.')
flags.DEFINE_boolean('list', None,
'List all the modules in the core package imported.')
flags.DEFINE_string('file', None, 'A specific file to run doctest on.')
flags.mark_flags_as_mutual_exclusive(['module', 'file'])
flags.mark_flags_as_mutual_exclusive(['list', 'file'])
PACKAGE = 'keras.'
def find_modules():
"""Finds all the modules in the core package imported.
Returns:
A list containing all the modules in tensorflow.python.
"""
tf_modules = []
for name, module in sys.modules.items():
if name.startswith(PACKAGE):
tf_modules.append(module)
return tf_modules
def filter_on_submodules(all_modules, submodule):
"""Filters all the modules based on the module flag.
The module flag has to be relative to the core package imported.
For example, if `submodule=keras.layers` then, this function will return
all the modules in the submodule.
Args:
all_modules: All the modules in the core package.
submodule: Submodule to filter from all the modules.
Returns:
All the modules in the submodule.
"""
filtered_modules = [
mod for mod in all_modules if PACKAGE + submodule in mod.__name__
]
return filtered_modules
def get_module_and_inject_docstring(file_path):
"""Replaces the docstring of the module with the changed file's content.
Args:
file_path: Path to the file
Returns:
A list containing the module changed by the file.
"""
file_path = os.path.abspath(file_path)
mod_index = file_path.find(PACKAGE.replace('.', os.sep))
file_mod_name, _ = os.path.splitext(file_path[mod_index:])
file_module = sys.modules[file_mod_name.replace(os.sep, '.')]
with open(file_path, 'r') as f:
content = f.read()
file_module.__doc__ = content
return [file_module]
class TfTestCase(tf.test.TestCase):
def set_up(self, _):
self.setUp()
def tear_down(self, _):
self.tearDown()
def load_tests(unused_loader, tests, unused_ignore):
"""Loads all the tests in the docstrings and runs them."""
tf_modules = find_modules()
if FLAGS.module:
tf_modules = filter_on_submodules(tf_modules, FLAGS.module)
if FLAGS.list:
print('**************************************************')
for mod in tf_modules:
print(mod.__name__)
print('**************************************************')
return tests
if FLAGS.file:
tf_modules = get_module_and_inject_docstring(FLAGS.file)
for module in tf_modules:
testcase = TfTestCase()
tests.addTests(
doctest.DocTestSuite(
module,
test_finder=doctest.DocTestFinder(exclude_empty=False),
extraglobs={
'tf': tf,
'np': np,
'os': os
},
setUp=testcase.set_up,
tearDown=testcase.tear_down,
checker=keras_doctest_lib.KerasDoctestOutputChecker(),
optionflags=(doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
| doctest.IGNORE_EXCEPTION_DETAIL
| doctest.DONT_ACCEPT_BLANKLINE),
))
return tests
if __name__ == '__main__':
absltest.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
0bf4b60409f8963c2d70cea39cd3c61da4cea456
|
252ba0216ce9e38bc4375faf869e7c91fa153c92
|
/Lightwave/materials.py
|
ad16360206707dd42a0a20e7a20f9204e4eb20df
|
[] |
no_license
|
kurainooni/Final_Fantasy_VII_Remake
|
07c45a330858a41a6bad3f0b3fa915ce9fbf7372
|
5f9fac23bb705a3753a51f62ed80ecb5e36b78ab
|
refs/heads/master
| 2022-04-23T00:39:22.816357
| 2020-04-22T19:03:33
| 2020-04-22T19:03:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,750
|
py
|
#PBR Surface Setup by Ryan Roye
import lwsdk
import os
from tempfile import NamedTemporaryFile
from utils import *
CoordinateList = {
"Color": "-600 -220",
"Alpha": "-1280 -740",
"Normal": "-540 -800",
"Opacity": "-280 -100"
}
UV_count = lwsdk.LWObjectFuncs().numVMaps(lwsdk.LWVMAP_TXUV)
def createMaterial(files, vertexGroups, materials, surf):
textures = getMaterialTypes(files)
"""
textures: {
'NP0002_00_Body': {'NP0002_00_Body_C', 'NP0002_00_Body_A', 'NP0002_00_Body_N'},
'NP0002_00_Eye': {'NP0002_00_Eye_C'},
'NP0002_00_Eyelash': {'NP0002_00_Head_N', 'NP0002_00_Head_C'},
'NP0002_00_Hair': {'NP0002_00_Hair_C', 'NP0002_00_Hair_A', 'NP0002_00_Hair_N'},
'NP0002_00_Head': {'NP0002_00_Head_N', 'NP0002_00_Head_C'},
'NP0002_00_Mouth': set(),
'NP0002_00_Skin': {'NP0002_00_Body_C', 'NP0002_00_Body_N'}
}
"""
texture_list = textures[surf]
NodeBuild = []
NodeBuild.append("{ Surface\n")
NodeBuild.append(" \"Default\"\n")
NodeBuild.append(" SmoothingAngle 1.5625\n")
NodeBuild.append(" CompatibilityVersion 77b\n")
NodeBuild.append(" { Nodes\n")
NodeBuild.append(" { Root\n")
NodeBuild.append(" Location 0 0\n")
NodeBuild.append(" Zoom 1\n")
NodeBuild.append(" Disabled 1\n")
NodeBuild.append(" }\n")
NodeBuild.append(" Version 1\n")
NodeBuild.append(" { Nodes\n")
NodeBuild.append(" Server \"Surface\"\n")
NodeBuild.append(" { Tag\n")
NodeBuild.append(" RealName \"Surface\"\n")
NodeBuild.append(" Name \"Surface\"\n")
NodeBuild.append(" Coordinates -1890 -561\n")
NodeBuild.append(" Mode 1\n")
NodeBuild.append(" Selected 0\n")
NodeBuild.append(" { Data\n")
NodeBuild.append(" }\n")
NodeBuild.append(" Preview \"\"\n")
NodeBuild.append(" }\n")
NodeBuild.append(" Server \"Input\"\n")
NodeBuild.append(" { Tag\n")
NodeBuild.append(" RealName \"Input\"\n")
NodeBuild.append(" Name \"Input\"\n")
NodeBuild.append(" Coordinates 0 -289\n")
NodeBuild.append(" Mode 0\n")
NodeBuild.append(" Selected 0\n")
NodeBuild.append(" { Data\n")
NodeBuild.append(" }\n")
NodeBuild.append(" Preview \"Item ID\"\n")
NodeBuild.append(" }\n")
# Add material node
if 'head' in surf.lower() or 'skin' in surf.lower():
NodeBuild.append(" Server \"Skin\"\n")
NodeBuild.append(" { Tag\n")
NodeBuild.append(" RealName \"Skin\"\n")
NodeBuild.append(" Name \"Skin (1)\"\n")
NodeBuild.append(" Coordinates -1260 -480\n")
'''
NodeBuild.append(" { Data\n")
NodeBuild.append(" { Attributes\n")
NodeBuild.append(" { Metadata\n")
NodeBuild.append(" Version 1\n")
NodeBuild.append(" Enumerations 0\n")
NodeBuild.append(" { AttributeData\n")
#color
NodeBuild.append(" { Attr\n")
NodeBuild.append(" Name \"Epidermis Color\"\n")
NodeBuild.append(" Flags 0\n")
NodeBuild.append(" Tag \"ENVELOPE\" \"On\"\n")
NodeBuild.append(" Tag \"FORMAT\" \"Color\"\n")
NodeBuild.append(" Tag \"NodeInputID\" \"\"\n")
NodeBuild.append(" { Value\n")
NodeBuild.append(" \"vparam3\"\n")
NodeBuild.append(" { Value\n")
NodeBuild.append(" 3\n")
NodeBuild.append(" 0.9881 0.9098 0.7685\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
'''
else:
NodeBuild.append(" Server \"Principled BSDF\"\n")
NodeBuild.append(" { Tag\n")
NodeBuild.append(" RealName \"Principled BSDF\"\n")
NodeBuild.append(" Name \"Principled BSDF (1)\"\n")
NodeBuild.append(" Coordinates -894 -393\n")
NodeBuild.append(" { Data\n")
NodeBuild.append(" { Attributes\n")
NodeBuild.append(" { Metadata\n")
NodeBuild.append(" Version 1\n")
NodeBuild.append(" Enumerations 0\n")
NodeBuild.append(" { AttributeData\n")
#color
NodeBuild.append(" { Attr\n")
NodeBuild.append(" Name \"Color\"\n")
NodeBuild.append(" Flags 0\n")
NodeBuild.append(" Tag \"ENVELOPE\" \"On\"\n")
NodeBuild.append(" Tag \"FORMAT\" \"Percent\"\n")
NodeBuild.append(" Tag \"NodeInputID\" \"\"\n")
NodeBuild.append(" { Value\n")
NodeBuild.append(" \"vparam3\"\n")
NodeBuild.append(" { Value\n")
NodeBuild.append(" 3\n")
NodeBuild.append(" 0.5 0.5 0.5\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
#specular
NodeBuild.append(" { Attr\n")
NodeBuild.append(" Name \"Specular\"\n")
NodeBuild.append(" Flags 0\n")
NodeBuild.append(" Tag \"ENVELOPE\" \"On\"\n")
NodeBuild.append(" Tag \"FORMAT\" \"Percent\"\n")
NodeBuild.append(" Tag \"NodeInputID\" \"\"\n")
NodeBuild.append(" { Value\n")
NodeBuild.append(" \"vparam\"\n")
NodeBuild.append(" { Value\n")
NodeBuild.append(" 1\n")
NodeBuild.append(" 0\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
#roughness
NodeBuild.append(" { Attr\n")
NodeBuild.append(" Name \"Roughness\"\n")
NodeBuild.append(" Flags 0\n")
NodeBuild.append(" Tag \"ENVELOPE\" \"On\"\n")
NodeBuild.append(" Tag \"FORMAT\" \"Percent\"\n")
NodeBuild.append(" Tag \"NodeInputID\" \"\"\n")
NodeBuild.append(" { Value\n")
NodeBuild.append(" \"vparam\"\n")
NodeBuild.append(" { Value\n")
NodeBuild.append(" 1\n")
NodeBuild.append(" 1\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
#close
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
NodeBuild.append(" Preview \"Material\"\n")
NodeBuild.append(" }\n")
for t in texture_list:
loc = (0,0)
the_file = ""
UVMap = lwsdk.LWObjectFuncs().vmapName(lwsdk.LWVMAP_TXUV, 0)
if t[-2:] == '_C':
the_file = files.data['tex_dir'] + "\\" + t + ".png"
loc = CoordinateList["Color"]
Server = " Server \"Image\"\n"
RealName = " RealName \"Image\"\n"
Name = " Name \"Image (1)\"\n"
elif t[-2:] == '_N':
the_file = files.data['tex_dir'] + "\\" + t + ".png"
loc = CoordinateList["Normal"]
Server = " Server \"NormalMap\"\n"
RealName = " RealName \"NormalMap\"\n"
Name = " Name \"NormalMap (1)\"\n"
elif t[-2:] == '_A':
the_file = files.data['tex_dir'] + "\\" + t + ".png"
loc = CoordinateList["Alpha"]
Server = " Server \"Image\"\n"
RealName = " RealName \"Image\"\n"
Name = " Name \"Image (2)\"\n"
elif t[-2:] == '_O':
the_file = files.data['tex_dir'] + "\\" + t + ".png"
loc = CoordinateList["Opacity"]
if UV_count > 1:
UVMap = lwsdk.LWObjectFuncs().vmapName(lwsdk.LWVMAP_TXUV, 1)
Server = " Server \"Image\"\n"
RealName = " RealName \"Image\"\n"
Name = " Name \"Image (3)\"\n"
NodeBuild.append(Server)
NodeBuild.append(" { Tag\n")
NodeBuild.append(RealName)
NodeBuild.append(Name)
NodeBuild.append(" Coordinates " + loc + "\n")
NodeBuild.append(" { Data\n")
if t[-2:] == '_A':
NodeBuild.append(" InvertColor 1\n")
NodeBuild.append(" { Image\n")
if os.path.exists(the_file):
NodeBuild.append(" { Clip\n")
NodeBuild.append(" { Still\n")
NodeBuild.append(" \"" + the_file + "\"\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
NodeBuild.append(" Mapping 5\n")
NodeBuild.append(" UV \"" + UVMap + "\"\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
#make them there connections
NodeBuild.append(" { Connections\n")
NodeBuild.append(" NodeName \"Surface\"\n")
NodeBuild.append(" InputName \"Material\"\n")
if 'head' in surf.lower() or 'skin' in surf.lower():
NodeBuild.append(" InputNodeName \"Skin (1)\"\n")
else:
NodeBuild.append(" InputNodeName \"Principled BSDF (1)\"\n")
NodeBuild.append(" InputOutputName \"Material\"\n")
for t in texture_list:
if t[-2:] == '_A':
NodeBuild.append(" NodeName \"Surface\"\n")
NodeBuild.append(" InputName \"Clip\"\n")
NodeBuild.append(" InputNodeName \"Image (2)\"\n")
NodeBuild.append(" InputOutputName \"Color\"\n")
elif t[-2:] == '_C':
if 'head' in surf.lower() or 'skin' in surf.lower():
NodeBuild.append(" NodeName \"Skin (1)\"\n")
NodeBuild.append(" InputName \"Epidermis Color\"\n")
NodeBuild.append(" InputNodeName \"Image (1)\"\n")
NodeBuild.append(" InputOutputName \"Color\"\n")
NodeBuild.append(" NodeName \"Skin (1)\"\n")
NodeBuild.append(" InputName \"Dermis Color\"\n")
NodeBuild.append(" InputNodeName \"Image (1)\"\n")
NodeBuild.append(" InputOutputName \"Color\"\n")
else:
NodeBuild.append(" NodeName \"Principled BSDF (1)\"\n")
NodeBuild.append(" InputName \"Color\"\n")
NodeBuild.append(" InputNodeName \"Image (1)\"\n")
NodeBuild.append(" InputOutputName \"Color\"\n")
elif t[-2:] == '_N':
if 'head' in surf.lower() or 'skin' in surf.lower():
NodeBuild.append(" NodeName \"Skin (1)\"\n")
else:
NodeBuild.append(" NodeName \"Principled BSDF (1)\"\n")#
NodeBuild.append(" InputName \"Normal\"\n")
NodeBuild.append(" InputNodeName \"NormalMap (1)\"\n")
NodeBuild.append(" InputOutputName \"Normal\"\n")
elif t[-2:] == '_O':
NodeBuild.append(" NodeName \"Image (1)\"\n")
NodeBuild.append(" InputName \"Opacity\"\n")
NodeBuild.append(" InputNodeName \"Image (3)\"\n")
NodeBuild.append(" InputOutputName \"Color\"\n")
# End connections segment
NodeBuild.append(" }\n")
NodeBuild.append(" }\n")
NodeBuild.append("}\n")
# Complete the node build by putting it together
NodeBuild = "".join(NodeBuild)
f = NamedTemporaryFile(mode='w+', delete=False)
f.write(NodeBuild)
f.close()
lwsdk.command("Surf_LoadText \"" + f.name + "\"")
os.unlink(f.name) # delete the file
def process_surfaces(files, vertexGroups, materials):
for k in range(len(vertexGroups)):
shader = materials[k]['name']
surface_funcs = lwsdk.LWSurfaceFuncs()
surface = surface_funcs.first()
while surface:
if surface_funcs.name(surface) == shader:
#print "\r"
#print surface_funcs.name(surface)
lwsdk.command('Surf_SetSurf ' + shader)
createMaterial(files, vertexGroups, materials, shader)
break
surface = surface_funcs.next(surface)
|
[
"noreply@github.com"
] |
kurainooni.noreply@github.com
|
279ccefd70a9115811b5d371952be5c52f8d5260
|
1bfad01139237049eded6c42981ee9b4c09bb6de
|
/RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/ldpv6loopbackinterface.py
|
084aae0f6e61055ee45471d7538e4d9e4c44f3f6
|
[
"MIT"
] |
permissive
|
kakkotetsu/IxNetwork
|
3a395c2b4de1488994a0cfe51bca36d21e4368a5
|
f9fb614b51bb8988af035967991ad36702933274
|
refs/heads/master
| 2020-04-22T09:46:37.408010
| 2019-02-07T18:12:20
| 2019-02-07T18:12:20
| 170,284,084
| 0
| 0
|
MIT
| 2019-02-12T08:51:02
| 2019-02-12T08:51:01
| null |
UTF-8
|
Python
| false
| false
| 14,034
|
py
|
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Ldpv6LoopbackInterface(Base):
"""The Ldpv6LoopbackInterface class encapsulates a user managed ldpv6LoopbackInterface node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Ldpv6LoopbackInterface property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'ldpv6LoopbackInterface'
def __init__(self, parent):
super(Ldpv6LoopbackInterface, self).__init__(parent)
@property
def Active(self):
"""Activate/Deactivate Configuration
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('active')
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def Errors(self):
"""A list of errors that have occurred
Returns:
list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))
"""
return self._get_attribute('errors')
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def SessionStatus(self):
"""Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
Returns:
list(str[down|notStarted|up])
"""
return self._get_attribute('sessionStatus')
@property
def StateCounts(self):
"""A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
Returns:
dict(total:number,notStarted:number,down:number,up:number)
"""
return self._get_attribute('stateCounts')
@property
def Status(self):
"""Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
str(configured|error|mixed|notStarted|started|starting|stopping)
"""
return self._get_attribute('status')
def add(self, Name=None):
"""Adds a new ldpv6LoopbackInterface node on the server and retrieves it in this instance.
Args:
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns:
self: This instance with all currently retrieved ldpv6LoopbackInterface data using find and the newly added ldpv6LoopbackInterface data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the ldpv6LoopbackInterface data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Count=None, DescriptiveName=None, Errors=None, Name=None, SessionStatus=None, StateCounts=None, Status=None):
"""Finds and retrieves ldpv6LoopbackInterface data from the server.
All named parameters support regex and can be used to selectively retrieve ldpv6LoopbackInterface data from the server.
By default the find method takes no parameters and will retrieve all ldpv6LoopbackInterface data from the server.
Args:
Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
Errors (list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))): A list of errors that have occurred
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
SessionStatus (list(str[down|notStarted|up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
Status (str(configured|error|mixed|notStarted|started|starting|stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
self: This instance with matching ldpv6LoopbackInterface data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of ldpv6LoopbackInterface data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the ldpv6LoopbackInterface data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None):
"""Base class infrastructure that gets a list of ldpv6LoopbackInterface device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args:
PortNames (str): optional regex of port names
Active (str): optional regex of active
Returns:
list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def FetchAndUpdateConfigFromCloud(self, Mode):
"""Executes the fetchAndUpdateConfigFromCloud operation on the server.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/globals?deepchild=*|/api/v1/sessions/1/ixnetwork/topology?deepchild=*)): The method internally sets Arg1 to the current href for this instance
Mode (str):
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('FetchAndUpdateConfigFromCloud', payload=locals(), response_object=None)
def RestartDown(self):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def RestartDown(self, SessionIndices):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def RestartDown(self, SessionIndices):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def Start(self):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
|
[
"hubert.gee@keysight.com"
] |
hubert.gee@keysight.com
|
74befc7ccc398c0d5a49d7623ec83e2117e95fb6
|
94b6393ada217c41ee27eb85e0278586076e24af
|
/upload_media/migrations/0004_music.py
|
43482fe8f971d1ac1bc6c217e8bb58adf3e4dc47
|
[] |
no_license
|
almubarok/django-media
|
305467b96da0bb5cfaeee62b87fd8ee127eccf86
|
9989dc433f7c1efec392b5268235e4129e497a90
|
refs/heads/master
| 2022-11-24T17:04:03.348725
| 2020-04-08T08:00:18
| 2020-04-08T08:00:18
| 227,626,067
| 0
| 0
| null | 2022-11-22T05:50:17
| 2019-12-12T14:30:58
|
Python
|
UTF-8
|
Python
| false
| false
| 582
|
py
|
# Generated by Django 3.0 on 2019-12-12 16:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('upload_media', '0003_video'),
]
operations = [
migrations.CreateModel(
name='Music',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='', max_length=255)),
('music', models.FileField(upload_to='musics/')),
],
),
]
|
[
"arafat@alterra.id"
] |
arafat@alterra.id
|
6a8033396d71abdc7d3752df82ae435b7241f1f7
|
e5cce5ed1a6718e1f2dccc7b50d0a391c856eb64
|
/DeleteBook.py
|
13a737530c11eaff8512dec5709f947b9f94181b
|
[] |
no_license
|
dhruv2504/Minor-Project
|
6b32039e4957d89dde6c9552ae5fb39be6a63974
|
6b89929c0bcba95a1c84ab4ca3d999882d809087
|
refs/heads/main
| 2023-01-30T04:02:42.505016
| 2020-12-05T22:35:40
| 2020-12-05T22:35:40
| 318,790,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,022
|
py
|
from tkinter import *
import PIL
from PIL import ImageTk, Image
from tkinter import messagebox
# import pymysql
# Add your own database name and password here to reflect in the code
import mysql.connector
from mysql.connector import Error
try:
connection = mysql.connector.connect(host="localhost",
database="db",
user="root",
password="aryabhatt2488",
auth_plugin='mysql_native_password'
)
if connection.is_connected():
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
cursor = connection.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
print("You're connected to database: ", record)
except Error as e:
print("Error while connecting to MySQL", e)
# Enter Table Names here
issueTable = "books_issued"
bookTable = "books" # Book Table
def deleteBook():
bid = bookInfo1.get()
deleteSql = "delete from " + bookTable + " where bid = '" + bid + "'"
deleteIssue = "delete from " + issueTable + " where bid = '" + bid + "'"
try:
cursor.execute(deleteSql)
connection.commit()
cursor.execute(deleteIssue)
connection.commit()
messagebox.showinfo('Success', "Book Record Deleted Successfully")
except:
messagebox.showinfo("Please check Book ID")
print(bid)
bookInfo1.delete(0, END)
root.destroy()
def delete():
global bookInfo1, bookInfo2, bookInfo3, bookInfo4, Canvas1, connection, cursor, bookTable, root
root = Tk()
root.title("Library")
root.minsize(width=400, height=400)
root.geometry("600x500")
Canvas1 = Canvas(root)
Canvas1.config(bg="#006B38")
Canvas1.pack(expand=True, fill=BOTH)
headingFrame1 = Frame(root, bg="#FFBB00", bd=5)
headingFrame1.place(relx=0.25, rely=0.1, relwidth=0.5, relheight=0.13)
headingLabel = Label(headingFrame1, text="Delete Book", bg='black', fg='white', font=('Courier', 15))
headingLabel.place(relx=0, rely=0, relwidth=1, relheight=1)
labelFrame = Frame(root, bg='black')
labelFrame.place(relx=0.1, rely=0.3, relwidth=0.8, relheight=0.5)
# Book ID to Delete
lb2 = Label(labelFrame, text="Book ID : ", bg='black', fg='white')
lb2.place(relx=0.05, rely=0.5)
bookInfo1 = Entry(labelFrame)
bookInfo1.place(relx=0.3, rely=0.5, relwidth=0.62)
# Submit Button
SubmitBtn = Button(root, text="SUBMIT", bg='#d1ccc0', fg='black', command=deleteBook)
SubmitBtn.place(relx=0.28, rely=0.9, relwidth=0.18, relheight=0.08)
quitBtn = Button(root, text="Quit", bg='#f7f1e3', fg='black', command=root.destroy)
quitBtn.place(relx=0.53, rely=0.9, relwidth=0.18, relheight=0.08)
root.mainloop()
|
[
"noreply@github.com"
] |
dhruv2504.noreply@github.com
|
1e86b553f4146e59a4c536f829f36cbc0ab0f2c4
|
9996e690148176151e694944cce144e865d6c45e
|
/webapp/news/views.py
|
04c2c1a137d3ac16213de36f3ff4da5a1f9f4c5c
|
[] |
no_license
|
r2d2-lex/flaskapp1
|
fc3d53ce1bfca67d91c95b5c38eee2867069829a
|
4483d3aca054ea8954c38453c1d4e0fdf52b9f03
|
refs/heads/master
| 2022-12-10T18:54:42.157389
| 2020-08-17T10:58:48
| 2020-08-17T10:58:48
| 227,061,321
| 0
| 0
| null | 2022-12-08T03:21:12
| 2019-12-10T08:00:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,865
|
py
|
from flask import abort, Blueprint, flash, current_app, render_template, redirect, request, url_for
from flask_login import current_user, login_required
from webapp import db
from webapp.news.forms import CommentForm
from webapp.weather import wether_by_city
from webapp.news.models import Comment, News
from webapp.utils import get_redirect_target
blueprint = Blueprint('news', __name__)
@blueprint.route('/')
def index():
weather = wether_by_city(current_app.config['WEATHER_CITY'])
page_title = "Прогноз погоды"
news_list = News.query.filter(News.text.isnot(None)).order_by(News.published.desc()).all()
print(news_list)
return render_template('news/index.html', weather=weather, page_title=page_title, news_list=news_list)
@blueprint.route('/news/<int:news_id>')
def single_news(news_id):
my_news = News.query.filter(News.id == news_id).first()
if not my_news:
abort(404)
comments_form = CommentForm(news_id=my_news.id)
return render_template('news/view_one.html', page_title=my_news.title, news=my_news, comments_form=comments_form)
@blueprint.route('/news/comment', methods=['POST'])
@login_required
def add_comment():
form = CommentForm()
if form.validate_on_submit():
comment = Comment(text=form.comment_text.data, news_id=form.news_id.data, user_id=current_user.id)
db.session.add(comment)
db.session.commit()
flash('Коментарий добавлен')
else:
for field, errors in form.errors.items():
for error in errors:
flash('Ошибка в поле "{}": - {}'.format(getattr(form, field).label.text, error))
print(form.errors)
flash('Пожалуйста исправьте ошибки в форме')
#return redirect(request.referrer)
return redirect(get_redirect_target())
|
[
"r2d2-lex@yandex.ru"
] |
r2d2-lex@yandex.ru
|
c343c3e9b33bc01ae4d7ba6fb1d5b3911a661cbd
|
5d2ad50bdf4e25b4b99c089a0589a1188235613e
|
/loop_practice.py
|
e410d3eb6b0cef4d38600efecbdda0b91ea015a0
|
[] |
no_license
|
Rashine/pythonpractice
|
7def4e23ad25adb8c6e643d5e6cf566674b3b31c
|
4bb7b405b540f5b07b915d412a19061bb8fa36d4
|
refs/heads/master
| 2020-04-11T22:25:43.039141
| 2019-06-01T05:45:55
| 2019-06-01T05:45:55
| 162,135,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
# x = 0
# for a in range(10):
# x = x + a
# print (x)
# for c in "coooooffffff":
# print(c)
# x = int(input("How many times did you yell?"))
# if x:
# for i in range(0, x):
# print("Clean up your room!")
# for n in range(1,21):
# if n%2 == 1:
# if n == 13:
# print(f"{n} is odd and unlucky")
# else:
# print(f"{n} is odd")
# else:
# if n == 4:
# print(f"{n} is even and unlucky")
# else:
# print(F"{n} is even")
# user_resp = None
# while user_resp != "yeah":
# user_resp = input ("Majajajajajajajajaja\n")
# n = 1
# while n <= 9:
# print("\U0001f600"*n)
# print("\n")
# n += 1
# user_resp = input ("Hey how's it going?\n")
# while user_resp != "Stop copying me":
# user_resp = input(f"{user_resp}\n")
# print("UGH FINE YOU WIN")
|
[
"w9w5w1w7w3w@gmail.com"
] |
w9w5w1w7w3w@gmail.com
|
3a8632abdfa8159d10e1ece8ba820c1d36b218a6
|
ad850de9c58a629c92f41522880c6e8df16501b6
|
/intellidata-sam-app/employees/employeepost/employeepost.py
|
195b3b825164376c0cb9acb244d53c724b637b5a
|
[] |
no_license
|
svjt78/intellidata-sam-app
|
0639e3a054d62901a60e66954e9088b89c89cbfb
|
9004b41a149df68ad99810b951baac0d1bfcea4a
|
refs/heads/master
| 2022-12-05T10:43:06.678018
| 2020-09-04T05:18:26
| 2020-09-04T05:18:26
| 285,724,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,323
|
py
|
import boto3
import json
import uuid
import csv
import time
def lambda_handler(event, context):
recordId = str(uuid.uuid4())
# ts = datetime.datetime.now().timestamp()
ts = int(round(time.time() * 1000))
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('intellidataEmployeeTable')
data=json.loads(event['body'])
try:
table.put_item(
Item={
'EMPLOYEE_ID': data["employeeid"],
'LOCAL_ID': data["id"],
'ITEM_ID': ts,
'SSN': data["ssn"],
'NAME': data["name"],
'SLUG': data["slug"],
'GENDERCODE': data["gendercode"],
'AGE': data["age"],
'BIRTHDATE': data["birthdate"],
'MARITALSTATUS': data["maritalstatus"],
'HOME_ADDRESS_LINE_1': data["home_address_line_1"],
'HOME_ADDRESS_LINE_2': data["home_address_line_2"],
'HOME_CITY': data["home_city"],
'HOME_STATE': data["home_state"],
'HOME_ZIPCODE': data["home_zipcode"],
'MAIL_ADDRESS_LINE_1': data["mail_address_line_1"],
'MAIL_ADDRESS_LINE_2': data["mail_address_line_2"],
'MAIL_CITY': data["mail_city"],
'MAIL_STATE': data["mail_state"],
'MAIL_ZIPCODE': data["mail_zipcode"],
'WORK_ADDRESS_LINE_1': data["work_address_line_1"],
'WORK_ADDRESS_LINE_2': data["work_address_line_2"],
'WORK_CITY': data["work_city"],
'WORK_STATE': data["work_state"],
'WORK_ZIPCODE': data["work_zipcode"],
'EMAIL': data["email"],
'ALTERNATE_EMAIL': data["alternate_email"],
'HOME_PHONE': data["home_phone"],
'WORK_PHONE': data["work_phone"],
'MOBILE_PHONE': data["mobile_phone"],
'ENROLLMENT_METHOD': data["enrollment_method"],
'EMPLOYMENT_INFORMATION': data["employment_information"],
'EMPLOYER': data["employer"],
'CREATOR': data["creator"],
'EMPLOYEE_DATE': data["employee_date"],
'SMS': data["sms"],
'EMAILER': data["emailer"],
'SOURCE': data["source"],
'EMPLOYERID': data["employerid"],
'ARTEFACT': data["artefact"],
'CONNECTION': data["backend_SOR_connection"],
'RESPONSE': data["response"],
'COMMIT_INDICATOR': data["commit_indicator"],
'RECORD_STATUS': data["record_status"],
}
)
except Exception as e:
print(e)
print('Error in reading data from intellidataEmployeeTable')
raise e
|
[
"suvojitdutta@suvojits-mbp.myfiosgateway.com"
] |
suvojitdutta@suvojits-mbp.myfiosgateway.com
|
012e27e8cf2851391772be85e3be09f3c2510d71
|
fbe9dbaff2dfa245546a37f43478fd75f5330928
|
/app.py
|
cf064239ff1a44e554fece3cbeb60ac0bb494eab
|
[] |
no_license
|
MrLokans/moonreader_editor
|
ed0eaf988da739e92f79467659c9cf505cd0f3ec
|
16123514e850fde58b624f312d6bd829385f1eee
|
refs/heads/master
| 2020-04-14T23:40:55.369510
| 2016-09-14T18:26:37
| 2016-09-14T18:26:37
| 68,024,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,928
|
py
|
import os
import sys
import logging
from moonreader_tools.handlers import FilesystemDownloader
from PyQt5.QtCore import Qt, QVariant
from PyQt5.QtWidgets import (
QApplication,
QLabel,
QWidget,
QAction,
QFileDialog,
QTextEdit,
QMainWindow,
QTableWidget,
QTableWidgetItem,
QMenu
)
logger = logging.getLogger('GUI')
HOME_DIR = os.path.expanduser('~')
HOME_DIR = '{}/Dropbox/Books/.Moon+/Cache'.format(HOME_DIR)
BOOK_TABLE_COLUMNS = 4
BOOK_TABLE_HEADER = ['title', 'pages', 'percentage', 'notes']
class NumberTableItem(QTableWidgetItem):
# This class should later be used for custom
# table item sorting
def __lt__(self, other):
if isinstance(other, QTableWidgetItem):
value = self.data(Qt.EditRole)
other_value = other.data(Qt.EditRole)
try:
return float(value) < float(other_value)
except (ValueError, TypeError):
pass
return super().__lt__(other)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.books = []
self.initUI()
def initUI(self):
self.booksTable = QTableWidget(1, BOOK_TABLE_COLUMNS, parent=self)
self.booksTable.setHorizontalHeaderLabels(BOOK_TABLE_HEADER)
self.booksTable.setContextMenuPolicy(Qt.CustomContextMenu)
self.textEdit = QTextEdit()
self.setCentralWidget(self.booksTable)
self.statusBar()
self.booksTable.customContextMenuRequested.connect(self.saveBookMenu)
openFile = QAction('Open', self)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open new File')
openFile.triggered.connect(self.showDialog)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openFile)
self.setGeometry(450, 450, 450, 400)
self.setWindowTitle('Moonreader editor')
self.show()
def saveBookMenu(self, position):
menu = QMenu()
saveAction = menu.addAction("Save book")
action = menu.exec_(self.booksTable.mapToGlobal(position))
if action == saveAction:
bookTableItem = self.booksTable.itemAt(position)
bookRowNumber = bookTableItem.row()
correspondingBook = self.books[bookRowNumber]
title = self.booksTable.item(bookRowNumber, 0)
pages = self.booksTable.item(bookRowNumber, 1)
percentage = self.booksTable.item(bookRowNumber, 2)
notes = self.booksTable.item(bookRowNumber, 3)
correspondingBook.title = title.text()
correspondingBook.pages = int(pages.text())
correspondingBook.percentage = float(percentage.text())
correspondingBook.notes = int(notes.text())
correspondingBook.save()
def showDialog(self):
search_dir = QFileDialog.getExistingDirectory(self, 'Open dir', HOME_DIR)
handler = FilesystemDownloader()
self.books = [b for b in handler.get_books(path=search_dir)]
self.booksTable.setSortingEnabled(False)
for indx, book in enumerate(self.books):
table_rows = self.booksTable.rowCount()
if indx >= table_rows:
self.booksTable.insertRow(table_rows)
self._fill_book_table_row(self.booksTable, indx, book)
def _fill_book_table_row(self, table, index, book):
title = QTableWidgetItem(book.title)
pages = QTableWidgetItem(str(book.pages))
percentage = QTableWidgetItem(str(book.percentage))
notes = QTableWidgetItem(str(len(book.notes)))
table.setItem(index, 0, title)
table.setItem(index, 1, pages)
table.setItem(index, 2, percentage)
table.setItem(index, 3, notes)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec_())
|
[
"trikster1911@gmail.com"
] |
trikster1911@gmail.com
|
d1a0d48c5751d3fe5081350e717a512bffb7c337
|
e2969ce860cba03add07d3b6412397d337b8b0a1
|
/algorithms/sorting/3_sort_students/solution/solution.py
|
7d56b3942e0aa44c166ed7ae14f7dac7ebe3c637
|
[] |
no_license
|
AshRavi7/Exercises
|
cfe16ef1d7a248b1fead12e3fb4dd0b26205de07
|
0a2738771e03b36e353cc25edfc7d8b8a0e4b2fe
|
refs/heads/master
| 2022-12-20T08:30:04.104476
| 2020-09-11T15:58:46
| 2020-09-11T15:58:46
| 290,986,365
| 0
| 0
| null | 2020-08-28T07:56:53
| 2020-08-28T07:56:52
| null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
class Student:
def __init__(self, name=None, avg=None, grade=None):
self.name = name
self.avg = avg
self.grade = grade
def sort_students(slst):
pass
|
[
"avelikevitch@gmail.com"
] |
avelikevitch@gmail.com
|
0e07d871bc76f4c86b6bcfe3db5c70a61ecf2acb
|
5317266e4fd836aef20427092d0d734dd0344bb7
|
/ScanSsqrtB160_100.py
|
038e04209379e6c805e7b1c98abc34e91f6c0746
|
[] |
no_license
|
lopezzot/myanalysis
|
5c29c6b2aef2a543af8718a1f88e6385dec44114
|
75a708bacb9e660b0505a85275fc68b863ea0f17
|
refs/heads/master
| 2021-07-01T16:20:26.090382
| 2020-11-07T20:06:47
| 2020-11-07T20:06:47
| 189,423,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,176
|
py
|
#!/usr/bin/env python
#code to show 2d histo of s/sqrt(b)
#to be used with /home/delphesresults/3abg_fcc90_nocut for background samples
#to be used with /home/delphesresults/alpscan for signal samples
#created on 15/5/2019
#from 3a_cutMALPvariables
import sys
import ROOT
import includeme
from array import array
from ROOT import TGraph2D
if len(sys.argv) < 3:
print " Usage: python examples/analysis.py inputBg.root inputSg.root output.root testmass"
sys.exit(1)
ROOT.gSystem.Load("libDelphes")
try:
ROOT.gInterpreter.Declare('#include "classes/SortableObject.h"')
ROOT.gInterpreter.Declare('#include "classes/DelphesClasses.h"')
ROOT.gInterpreter.Declare('#include "external/ExRootAnalysis/ExRootTreeReader.h"')
except:
pass
global BgFile
BgFile = sys.argv[1] #background file
SgFile = sys.argv[2] #signal file
outputFile = sys.argv[3]
test_testmass = sys.argv[4]
# Create chain of root trees
Bgchain = ROOT.TChain("Delphes")
BgFile0 = BgFile+"0_delphes.root"
Bgchain.Add(BgFile0)
Sgchain = ROOT.TChain("Delphes")
Sgchain.Add(SgFile)
# Create object of class ExRootTreeReader
BgtreeReader = ROOT.ExRootTreeReader(Bgchain)
global BgnumberOfEntries
BgnumberOfEntries = BgtreeReader.GetEntries()
print "bg entries: "+str(BgnumberOfEntries)
SgtreeReader = ROOT.ExRootTreeReader(Sgchain)
global SgnumberOfEntries
SgnumberOfEntries = SgtreeReader.GetEntries()
print "sg entries: "+str(SgnumberOfEntries)
# Get pointers to branches used in this analysis
global BgbranchPhoton
global BgbranchTrack
BgbranchPhoton = BgtreeReader.UseBranch("Photon")
BgbranchTrack = BgtreeReader.UseBranch("Track")
global SgbranchPhoton
global SgbranchTrack
SgbranchPhoton = SgtreeReader.UseBranch("Photon")
SgbranchTrack = SgtreeReader.UseBranch("Track")
#testmass for ass3l()
global testmass
#testmass = 80
testmass = float(test_testmass)
#ecm
global ecm
ecm = 80.*2 #GeV
#cross-section to rescale graphs
#xsec scanalp90_80 eta=10 = 3.294275e-06 pb
global xsec_scanalp160_100_eta10
global xsec_3abg160_eta10
global luminosity
#couplings theoretical from xsecttocyy.py eta10 ecm=90
#etaa=10.
#[0.00010584473200797015, 0.00011195767438563123, 0.00012333718369766136, 0.00014287565957617757, 0.0001768077913388226, 0.00024087176728341438, 0.0003875681881371373, 0.0008997139223959913, 0.016127450112823835]
#xsec10 = [0.0002380292, 0.0002127458, 0.0001752995, 0.0001306328, 8.530332e-05, 4.596179e-05, 1.775303e-05, 3.294275e-06]
xsec_scanalp160_100_eta10 = 0.496069 #ab #from lorenzo/alp/lorenzo/xsectocyy.py
xsec_3abg160_eta10 = 1.2534e06 #ab #from polesell/work/alpbg/fcc3a/Events/3abf_90_1
luminosity = 10.0 #ab^-1
out_root = ROOT.TFile(outputFile,"RECREATE")
#Book histograms
histSgsqrtBEphoDR = ROOT.TH2F("Sg/sqrt(B)", "Sg/sqrt(B)", 100, 0.0, 2.0, 100, 0., 10.)
def funcs(cutDR, cutEpho):
s_counter = 0
# Loop over signal events
for entry in range(0, SgnumberOfEntries):
# Load selected branches with data from specified event
SgtreeReader.ReadEntry(entry)
threephotons_vec = [] #array with 3 TLorentzVector from 3 photons
#use only events with 3 photons and not tracks
if SgbranchPhoton.GetEntries() != 3:
continue
if SgbranchTrack.GetEntries() != 0:
continue
#add lorentzvetor photons in vector
for i in range(0, SgbranchPhoton.GetEntries()):
photon = SgbranchPhoton.At(i)
Photon_TLV = photon.P4() #TLorentzVector
threephotons_vec.append(Photon_TLV)
#find two photons
ipalp1, ipalp2, imind, egtest = includeme.ass_3l(threephotons_vec, ecm, testmass)
#assing ipalp1 to the photon with max energy between ipalp1 and ipalp2
if threephotons_vec[ipalp1].E() < threephotons_vec[ipalp2].E():
ipalp1_temporary = ipalp2
ipalp2 = ipalp1
ipalp1 = ipalp1_temporary
del ipalp1_temporary
MALP = (threephotons_vec[ipalp1]+threephotons_vec[ipalp2]).M()
epho1 = threephotons_vec[ipalp1].E()
sigmaepho1 =(0.1*0.1*epho1+0.01*0.01*epho1*epho1)**0.5
epho2 = threephotons_vec[ipalp2].E()
sigmaepho2 =(0.1*0.1*epho2+0.01*0.01*epho2*epho2)**0.5
epho3 = threephotons_vec[imind].E()
sigmaepho3 =(0.1*0.1*epho3+0.01*0.01*epho3*epho3)**0.5
#sigmaalp = 1.057 #from sigma of reconstructed MALP
sigmaalp=MALP*0.5*((sigmaepho1/epho1)*(sigmaepho1/epho1)+(sigmaepho2/epho2)*(sigmaepho2/epho2))**0.5
ephotest = (ecm*ecm-testmass*testmass)/2./ecm
MALPcut = ((MALP-testmass)**2/(sigmaalp**2)+(epho3-ephotest)**2/(sigmaepho3**2))**0.5
epho2epho1 = threephotons_vec[ipalp2].E()/threephotons_vec[ipalp1].E()
Sg_DeltaR = threephotons_vec[ipalp1].DeltaR(threephotons_vec[ipalp2])
if MALPcut < 1.5 and ((0.4<epho2epho1<0.5 and Sg_DeltaR<3.2) or (epho2epho1>0.5 and Sg_DeltaR<3.0)):
s_counter = s_counter+1
#print s_counter
return s_counter
def funcb(cutDR, cutEpho):
b_counter = 0
for filenumber in range(0,10):
Bgchain = ROOT.TChain("Delphes")
print BgFile+str(filenumber)+"_delphes.root"
Bgchain.Add(BgFile+str(filenumber)+"_delphes.root")
# Create object of class ExRootTreeReader
BgtreeReader = ROOT.ExRootTreeReader(Bgchain)
# Get pointers to branches used in this analysis
BgbranchPhoton = BgtreeReader.UseBranch("Photon")
BgbranchTrack = BgtreeReader.UseBranch("Track")
# Loop over background events
for entry in range(0, BgnumberOfEntries):
# Load selected branches with data from specified event
BgtreeReader.ReadEntry(entry)
threephotons_vec = [] #array with 3 TLorentzVector from 3 photons
#use only events with 3 photons and not tracks
if BgbranchPhoton.GetEntries() != 3:
continue
if BgbranchTrack.GetEntries() != 0:
continue
#add lorentzvetor photons in vector
for i in range(0, BgbranchPhoton.GetEntries()):
photon = BgbranchPhoton.At(i)
Photon_TLV = photon.P4() #TLorentzVector
threephotons_vec.append(Photon_TLV)
#find two photons
ipalp1, ipalp2, imind, egtest = includeme.ass_3l(threephotons_vec, ecm, testmass)
#assing ipalp1 to the photon with max energy between ipalp1 and ipalp2
if threephotons_vec[ipalp1].E() < threephotons_vec[ipalp2].E():
ipalp1_temporary = ipalp2
ipalp2 = ipalp1
ipalp1 = ipalp1_temporary
del ipalp1_temporary
MALP = (threephotons_vec[ipalp1]+threephotons_vec[ipalp2]).M()
epho1 = threephotons_vec[ipalp1].E()
sigmaepho1 =(0.1*0.1*epho1+0.01*0.01*epho1*epho1)**0.5
epho2 = threephotons_vec[ipalp2].E()
sigmaepho2 =(0.1*0.1*epho2+0.01*0.01*epho2*epho2)**0.5
epho3 = threephotons_vec[imind].E()
sigmaepho3 =(0.1*0.1*epho3+0.01*0.01*epho3*epho3)**0.5
#sigmaalp = 1.057 #from sigma of reconstructed MALP
sigmaalp=MALP*0.5*((sigmaepho1/epho1)*(sigmaepho1/epho1)+(sigmaepho2/epho2)*(sigmaepho2/epho2))**0.5
ephotest = (ecm*ecm-testmass*testmass)/2./ecm
MALPcut = ((MALP-testmass)**2/(sigmaalp**2)+(epho3-ephotest)**2/(sigmaepho3**2))**0.5
epho2epho1 = threephotons_vec[ipalp2].E()/threephotons_vec[ipalp1].E()
Bg_DeltaR = threephotons_vec[ipalp1].DeltaR(threephotons_vec[ipalp2])
if MALPcut < 1.5 and ((0.4<epho2epho1<0.5 and Bg_DeltaR<3.2) or (epho2epho1>0.5 and Bg_DeltaR<3.0)):
b_counter = b_counter+1
print "bkg file "+str(filenumber)+", sum of events in sg: "+str(b_counter)
return b_counter
def funcssqrtb(cutDR, cutEpho):
s = funcs(cutDR, cutEpho)
b = funcb(cutDR, cutEpho)
s1 = s*xsec_scanalp160_100_eta10*luminosity/SgnumberOfEntries
b1 = b*xsec_3abg160_eta10 * luminosity/(BgnumberOfEntries*10)
print "out-> "+str(cutDR)+" "+str(cutEpho)+" "+str(s)+" "+str(b)+" "+str(s1)+" "+str(b1)+" "+str(s1/(b1**0.5))+"\n"
coupling = (((b1**0.5)*2*0.01**2)/s1)**0.5
print "coupling no systematics: "+str(coupling)
testcoupling = []
testsign05 = []
testsign01 = []
testsign001 = []
for i in range(2000):
coupling = coupling+0.005
observed = b1+s1*coupling**2/(0.01**2)
pull05 = includeme.plotSignificance(observed, b1, 0.05)
pull01 = includeme.plotSignificance(observed, b1, 0.01)
pull001 = includeme.plotSignificance(observed, b1, 0.001)
testcoupling.append(coupling)
testsign05.append(pull05)
testsign01.append(pull01)
testsign001.append(pull001)
testsign05 = [abs(x-2.) for x in testsign05]
print testcoupling[testsign05.index(min(testsign05))]
testsign01 = [abs(x-2.) for x in testsign01]
print testcoupling[testsign01.index(min(testsign01))]
testsign001 = [abs(x-2.) for x in testsign001]
print testcoupling[testsign001.index(min(testsign001))]
return s,b,s1/(b1**0.5)
def funcshisto():
#Book histograms
histSgepho2epho1 = ROOT.TH1F("Sg_Epho2/Epho1", "Sg_Epho2/Epho1", 150, 0.0, 1.1)
histSgdeltar = ROOT.TH1F("Sg_DeltaR", "Sg_DeltaR", 100, 0., 10.)
histSgMALP = ROOT.TH1F("Sg_MALP", "Sg", 100, 0., 300.)
histSgMALPcut = ROOT.TH1F("Sg_MALPcut", "Sg", 100, 0., 50.)
histSgEphoDR = ROOT.TH2F("Sg", "Sg", 100, 0.0, 1.1, 100, 0., 6.)
histSgEphoMalp = ROOT.TH2F("Sg_2","Sg_2", 100, -4, 4, 100, -5, 5)
histSgEphoDR_afterMALPcut = ROOT.TH2F("Sg_afterMALPcut", "Sg_afterMALPcut", 100, 0.0, 1.1, 100, 0., 6.)
# Loop over signal events
for entry in range(0, SgnumberOfEntries):
# Load selected branches with data from specified event
SgtreeReader.ReadEntry(entry)
threephotons_vec = [] #array with 3 TLorentzVector from 3 photons
#use only events with 3 photons and not tracks
if SgbranchPhoton.GetEntries() != 3:
continue
if SgbranchTrack.GetEntries() != 0:
continue
#add lorentzvetor photons in vector
for i in range(0, SgbranchPhoton.GetEntries()):
photon = SgbranchPhoton.At(i)
Photon_TLV = photon.P4() #TLorentzVector
threephotons_vec.append(Photon_TLV)
#find two photons
ipalp1, ipalp2, imind, egtest = includeme.ass_3l(threephotons_vec, ecm, testmass)
#assing ipalp1 to the photon with max energy between ipalp1 and ipalp2
if threephotons_vec[ipalp1].E() < threephotons_vec[ipalp2].E():
ipalp1_temporary = ipalp2
ipalp2 = ipalp1
ipalp1 = ipalp1_temporary
del ipalp1_temporary
MALP = (threephotons_vec[ipalp1]+threephotons_vec[ipalp2]).M()
#print MALP
epho1 = threephotons_vec[ipalp1].E()
sigmaepho1 =(0.1*0.1*epho1+0.01*0.01*epho1*epho1)**0.5
epho2 = threephotons_vec[ipalp2].E()
sigmaepho2 =(0.1*0.1*epho2+0.01*0.01*epho2*epho2)**0.5
epho3 = threephotons_vec[imind].E()
sigmaepho3 =(0.1*0.1*epho3+0.01*0.01*epho3*epho3)**0.5
#sigmaalp = 1.057 #from sigma of reconstructed MALP
sigmaalp=MALP*0.5*((sigmaepho1/epho1)*(sigmaepho1/epho1)+(sigmaepho2/epho2)*(sigmaepho2/epho2))**0.5
ephotest = (ecm*ecm-testmass*testmass)/2./ecm
epho2epho1 = threephotons_vec[ipalp2].E()/threephotons_vec[ipalp1].E()
Sg_DeltaR = threephotons_vec[ipalp1].DeltaR(threephotons_vec[ipalp2])
MALPcut = ((MALP-testmass)**2/(sigmaalp**2)+(epho3-ephotest)**2/(sigmaepho3**2))**0.5
histSgEphoMalp.Fill((epho3-ephotest)/sigmaepho3, (MALP-testmass)/sigmaalp )
histSgepho2epho1.Fill(epho2epho1)
histSgdeltar.Fill(Sg_DeltaR)
histSgMALP.Fill(MALP)
histSgMALPcut.Fill(MALPcut)
histSgEphoDR.Fill(epho2epho1, Sg_DeltaR)
if MALPcut<1.5:
histSgEphoDR_afterMALPcut.Fill(epho2epho1, Sg_DeltaR)
histSgEphoDR_afterMALPcut.Write()
histSgEphoMalp.Write()
histSgepho2epho1.Write()
histSgdeltar.Write()
histSgMALP.Write()
histSgMALPcut.Write()
histSgEphoDR.Write()
def funcbhisto():
Bgchain = ROOT.TChain("Delphes")
Bgchain.Add(BgFile+"0_delphes.root")
# Create object of class ExRootTreeReader
BgtreeReader = ROOT.ExRootTreeReader(Bgchain)
# Get pointers to branches used in this analysis
BgbranchPhoton = BgtreeReader.UseBranch("Photon")
BgbranchTrack = BgtreeReader.UseBranch("Track")
histBgepho2epho1 = ROOT.TH1F("Bg_Epho2/Epho1", "Bg_Epho2/Epho1", 150, 0.0, 1.1)
histBgdeltar = ROOT.TH1F("Bg_DeltaR", "Bg_DeltaR", 100, 0., 10.)
histBgMALP = ROOT.TH1F("Bg_MALP", "Bg", 100, 0., 300.)
histBgMALPcut = ROOT.TH1F("Bg_MALPcut", "Bg", 100, 0., 50.)
histBgEphoDR = ROOT.TH2F("Bg", "Bg", 100, 0.00, 1.1, 100, 0., 6.)
histBgEphoMalp = ROOT.TH2F("Bg_2","Bg_2", 100, -70, 35, 100, -90, 60)
histBgEphoDR_afterMALPcut = ROOT.TH2F("Bg_afterMALPcut", "Bg_afterMALPcut", 100, 0.0, 1.1, 100, 0.0, 6.0)
# Loop over signal events
for entry in range(0, BgnumberOfEntries):
# Load selected branches with data from specified event
BgtreeReader.ReadEntry(entry)
threephotons_vec = [] #array with 3 TLorentzVector from 3 photons
#use only events with 3 photons and not tracks
if BgbranchPhoton.GetEntries() != 3:
continue
if BgbranchTrack.GetEntries() != 0:
continue
#add lorentzvetor photons in vector
for i in range(0, BgbranchPhoton.GetEntries()):
photon = BgbranchPhoton.At(i)
Photon_TLV = photon.P4() #TLorentzVector
threephotons_vec.append(Photon_TLV)
#find two photons
ipalp1, ipalp2, imind, egtest = includeme.ass_3l(threephotons_vec, ecm, testmass)
#assing ipalp1 to the photon with max energy between ipalp1 and ipalp2
if threephotons_vec[ipalp1].E() < threephotons_vec[ipalp2].E():
ipalp1_temporary = ipalp2
ipalp2 = ipalp1
ipalp1 = ipalp1_temporary
del ipalp1_temporary
MALP = (threephotons_vec[ipalp1]+threephotons_vec[ipalp2]).M()
#print MALP
epho1 = threephotons_vec[ipalp1].E()
sigmaepho1 =(0.1*0.1*epho1+0.01*0.01*epho1*epho1)**0.5
epho2 = threephotons_vec[ipalp2].E()
sigmaepho2 =(0.1*0.1*epho2+0.01*0.01*epho2*epho2)**0.5
epho3 = threephotons_vec[imind].E()
sigmaepho3 =(0.1*0.1*epho3+0.01*0.01*epho3*epho3)**0.5
#sigmaalp = 1.057 #from sigma of reconstructed MALP
sigmaalp=MALP*0.5*((sigmaepho1/epho1)*(sigmaepho1/epho1)+(sigmaepho2/epho2)*(sigmaepho2/epho2))**0.5
ephotest = (ecm*ecm-testmass*testmass)/2./ecm
epho2epho1 = threephotons_vec[ipalp2].E()/threephotons_vec[ipalp1].E()
Bg_DeltaR = threephotons_vec[ipalp1].DeltaR(threephotons_vec[ipalp2])
MALPcut = ((MALP-testmass)**2/(sigmaalp**2)+(epho3-ephotest)**2/(sigmaepho3)**2)**0.5
#print (epho3-ephotest)/sigmaepho3, (MALP-testmass)/sigmaalp
histBgEphoMalp.Fill((epho3-ephotest)/sigmaepho3, (MALP-testmass)/sigmaalp )
histBgepho2epho1.Fill(epho2epho1)
histBgdeltar.Fill(Bg_DeltaR)
histBgMALP.Fill(MALP)
histBgMALPcut.Fill(MALPcut)
histBgEphoDR.Fill(epho2epho1, Bg_DeltaR)
if MALPcut<1.5:
histBgEphoDR_afterMALPcut.Fill(epho2epho1, Bg_DeltaR)
histBgEphoDR_afterMALPcut.Write()
histBgEphoMalp.Write()
histBgepho2epho1.Write()
histBgdeltar.Write()
histBgMALP.Write()
histBgMALPcut.Write()
histBgEphoDR.Write()
cutDRarray = array('d',[])
cutEphoarray=array('d',[])
sbarray = array('d',[])
sarray = array('d',[])
barray = array('d',[])
funcssqrtb(100,100)
'''
for j in range(5):
cutEpho = 0.8+0.05*j
for i in range(10):
cutDR = 3.0+(0.05*i)
s,b,sb = funcssqrtb(cutDR, cutEpho)
cutDRarray.append(cutDR)
cutEphoarray.append(cutEpho)
sbarray.append(sb)
sarray.append(s)
barray.append(b)
n = len(sarray)
SGraph = TGraph2D(n, cutDRarray, cutEphoarray, sarray)
SGraph.SetTitle("Signal")
BGraph = TGraph2D(n, cutDRarray, cutEphoarray, barray)
BGraph.SetTitle("Bg")
SBGraph = TGraph2D(n, cutDRarray, cutEphoarray, sbarray)
SBGraph.SetTitle("S/sqrt(B)")
SBGraph.Write()
BGraph.Write()
SGraph.Write()
'''
funcshisto()
funcbhisto()
out_root.Close()
|
[
"lorenzo.pezzotti01@gmail.com"
] |
lorenzo.pezzotti01@gmail.com
|
74998906d71c7dab68fe9ff9b6a07a3e816e084e
|
f35fa4c446c3b6eee91820eca94bfb3f1eb62e84
|
/migrations/versions/c76b0a2ed74e_user_roles.py
|
54744d4bd8a3aed4bfad794d43ae4b4669b27efd
|
[] |
no_license
|
jience/Flask_Web
|
f8ba44ccebb3f0a984bed819611366c5c2a4334a
|
a8bcd45cff7a163e1a5ffb9245bc993a02e8b034
|
refs/heads/master
| 2022-12-10T04:20:29.107311
| 2017-12-08T09:10:37
| 2017-12-08T09:10:37
| 92,914,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
"""user_roles
Revision ID: c76b0a2ed74e
Revises: 6748971a9397
Create Date: 2017-11-30 17:43:59.300218
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c76b0a2ed74e'
down_revision = '6748971a9397'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('roles', sa.Column('default', sa.Boolean(), nullable=True))
op.add_column('roles', sa.Column('permissions', sa.Integer(), nullable=True))
op.create_index(op.f('ix_roles_default'), 'roles', ['default'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_roles_default'), table_name='roles')
op.drop_column('roles', 'permissions')
op.drop_column('roles', 'default')
# ### end Alembic commands ###
|
[
"zhangxiaojie@cynovo.com.cn"
] |
zhangxiaojie@cynovo.com.cn
|
cbeebd70c2d6b12bbebbad2ce9762e7f5c9097fc
|
a4fa177f7694f6cd55c1440d1146acd48f38a6c9
|
/.ycm_extra_conf.py
|
452e5f6c949aa4b0fe2bc44c26890bd054df6c80
|
[] |
no_license
|
gysevvlad/systemc_labs
|
c47b9076be149d88424e679cf59987cf0b96138f
|
400af5e76a9b872a2835087401807205619c737b
|
refs/heads/master
| 2020-08-01T00:11:45.599820
| 2016-12-09T11:53:26
| 2016-12-09T11:53:26
| 73,587,643
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,146
|
py
|
import os
import ycm_core
flags = [
'-std=c++14',
'-I', '/usr/bin/../lib/clang/3.8.0/include',
'-I', '/usr/include',
'-I', '/usr/include/c++/v1',
'-I', './systemc/include',
'-I', '/usr/local/include',
'-DUSE_CLANG_COMPLETER'
]
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return { 'flags': final_flags }
|
[
"gysevvlad@gmail.com"
] |
gysevvlad@gmail.com
|
32026ca193a23d8420475f8c2d62152fdd0084db
|
4841bce0a21e92fe7de7fde0cdd94b4b881e07c1
|
/source/binarytree.py
|
e2b3194771263f8abac69454bed2f0d2b18c31ac
|
[] |
no_license
|
ajboxjr/CS-3-Data-Structures
|
e7872fccca16020b50b5650c7cad794c573eea21
|
234a069a935363fe1d48b3a2b22b98b0bbb36a92
|
refs/heads/master
| 2021-09-27T14:53:27.096150
| 2018-02-25T05:56:57
| 2018-02-25T05:56:57
| 115,154,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,079
|
py
|
#!python
class BinaryTreeNode(object):
def __init__(self, data):
"""Initialize this binary tree node with the given data."""
self.data = data
self.left = None
self.right = None
def __repr__(self):
"""Return a string representation of this binary tree node."""
return 'BinaryTreeNode({!r})'.format(self.data)
def is_leaf(self):
"""Return True if this node is a leaf (has no children)."""
# TODO: Check if both left child and right child have no value
return self.left == None and self.right == None
def is_branch(self):
"""Return True if this node is a branch (has at least one child)."""
# TODO: Check if either left child or right child has a value
return self.left != None or self.right != None
def height(self):
"""Return the height of this node (the number of edges on the longest
downward path from this node to a descendant leaf node).
TODO: Best and worst case running time: ??? under what conditions?"""
max_height = 0
#Base Case
node = self
if node: # if node exists
#Base Case
if node.is_leaf(): # has no left & right
print("leaf: {}: {}".format(node, max_height))
return max_height
elif node.is_branch: # has left or right
#add to height for branches
max_height +=1
left_height = 0
right_height = 0
if node.left:
left_height = node.left.height()
if node.right:
right_height = node.right.height()
print("parent {} (left: {} h:{}) (right {} h:{})".format(node, node.left, left_height, node.right, right_height))
if left_height > right_height: # choose the longer height (left)
node = node.left
max_height +=left_height
elif left_height < right_height:
node = node.right
max_height += right_height
else:
max_height += right_height# equal so chose on to return
return max_height
# TODO: Check if left child has a value and if so calculate its height
...
# TODO: Check if right child has a value and if so calculate its height
...
# Return one more than the greater of the left height and right height
...
class BinarySearchTree(object):
def __init__(self, items=None):
"""Initialize this binary search tree and insert the given items."""
self.root = None
self.size = 0
if items is not None:
for item in items:
self.insert(item)
def __repr__(self):
"""Return a string representation of this binary search tree."""
return 'BinarySearchTree({} nodes)'.format(self.size)
def is_empty(self):
"""Return True if this binary search tree is empty (has no nodes)."""
return self.root is None
def height(self):
"""Return the height of this tree (the number of edges on the longest
downward path from this tree's root node to a descendant leaf node).
TODO: Best and worst case running time:
Worst: O(logn) TODO
Best: O(1) 1 node under tree under what conditions?"""
# TODO: Check if root node has a value and if so calculate its height
if self.is_empty is True:
return 0
else:
return self.root.height()
def contains(self, item):
"""Return True if this binary search tree contains the given item.
TODO: Best case running time: ??? under what conditions?
TODO: Worst case running time: ??? under what conditions?"""
# Find a node with the given item, if any
node = self._find_node(item)
# Return True if a node was found, or False
return node is not None
def search(self, item):
"""Return an item in this binary search tree matching the given item,
or None if the given item is not found.
TODO: Best case running time: ??? under what conditions?
TODO: Worst case running time: ??? under what conditions?"""
# Find a node with the given item, if any
node = self._find_node(item)
# TODO: Return the node's data if found, or None
return node.data if node else None
def insert(self, item):
"""Insert the given item in order into this binary search tree.
TODO: Best case running time: ??? under what conditions?
TODO: Worst case running time: ??? under what conditions?"""
# Handle the case where the tree is empty
if self.is_empty():
# TODO: Create a new root node
self.root = BinaryTreeNode(item)
# TODO: Increase the tree size
self.size +=1
return
# Find the parent node of where the given item should be inserted
parent = self._find_parent_node(item)
if parent:
# TODO: Check if the given item should be inserted left of parent node
if parent.data > item:
parent.left = BinaryTreeNode(item)
# TODO: Create a new node and set the parent's left child
# TODO: Check if the given item should be inserted right of parent node
if parent.data < item:
parent.right = BinaryTreeNode(item)
# TODO: Create a new node and set the parent's right child
# TODO: Increase the tree size
else:
if self.root is not None:
if self.root.data > item:
self.root.left = BinaryTreeNode(item)
if self.root is not None:
if self.root.data < item:
self.root.right = BinaryTreeNode(item)
self.size +=1
def _find_node(self, item):
"""Return the node containing the given item in this binary search tree,
or None if the given item is not found.
TODO: Best case running time: ??? under what conditions?
TODO: Worst case running time: ??? under what conditions?"""
# Start with the root node
assert type(item) == type(self.root.data)
node = self.root
# Loop until we descend past the closest leaf node
while node is not None:
# TODO: Check if the given item matches the node's data
if node.data == item:
# Return the found node
return node
# TODO: Check if the given item is less than the node's data
elif node.data > item:
node = node.left
elif node.data < item:
node = node.right
return None
def _find_parent_node(self, item):
"""Return the parent node of the node containing the given item
(or the parent node of where the given item would be if inserted)
in this tree, or None if this tree is empty or has only a root node.
TODO: Best case running time: ??? under what conditions?
TODO: Worst case running time: ??? under what conditions?"""
# Start with the root node and keep track of its parent
node = self.root
parent = None
# Base God Says don't touch this
# if node is None:
# return parent
# Loop until we descend past the closest leaf node
while node is not None:
# TODO: Check if the given item matches the node's data
if node.data == item:
# Return the parent of the found node
return parent
# TODO: Check if the given item is less than the node's data
# if node.left is not None:
elif node.data > item:
# TODO: Update the parent and descend to the node's left child
parent = node # O
node = node.left # / \ IF left is None
# O O
# TODO: Check if the given item is greater than the node's data
# elif node.right is not None:
elif node.data < item:
# TODO: Update the parent and descend to the node's right child
parent = node
node = node.right
return parent
# This space intentionally left blank (please do not delete this comment)
def items_in_order(self):
"""Return an in-order list of all items in this binary search tree."""
pass
items = []
if not self.is_empty():
# Traverse tree in-order from root, appending each node's item
self._traverse_in_order_recursive(self.root, items.append)
# Return in-order list of all items in tree
return items
def _traverse_in_order_recursive(self, node, visit):
"""Traverse this binary tree with recursive in-order traversal (DFS).
Start at the given node and visit each node with the given function.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
#if it is a leaf append(visit)
#else: check recursive left then right
if node:
if node.is_leaf():
return visit(node.data)
if node.is_branch():
self._traverse_in_order_recursive(node.left, visit)
visit(node.data)
self._traverse_in_order_recursive(node.right, visit)
# TODO: Traverse left subtree, if it exists
...
# TODO: Visit this node's data with given function
...
# TODO: Traverse right subtree, if it exists
...
# def _traverse_in_order_iterative(self, node, visit):
# """Traverse this binary tree with iterative in-order traversal (DFS).
# Start at the given node and visit each node with the given function.
# TODO: Running time: ??? Why and under what conditions?
# TODO: Memory usage: ??? Why and under what conditions?"""
# # TODO: Traverse in-order without using recursion (stretch challenge)
# while node:
#
# if node.is_branch():
# if node.left is not None:
def items_pre_order(self):
"""Return a pre-order list of all items in this binary search tree."""
items = []
if not self.is_empty():
# Traverse tree pre-order from root, appending each node's item
self._traverse_pre_order_recursive(self.root, items.append)
# Return pre-order list of all items in tree
return items
def _traverse_pre_order_recursive(self, node, visit):
"""Traverse this binary tree with recursive pre-order traversal (DFS).
Start at the given node and visit each node with the given function.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
if node:
visit(node.data)
self._traverse_pre_order_recursive(node.left, visit)
self._traverse_pre_order_recursive(node.right, visit)
# TODO: Visit this node's data with given function
...
# TODO: Traverse left subtree, if it exists
...
# TODO: Traverse right subtree, if it exists
...
def _traverse_pre_order_iterative(self, node, visit):
"""Traverse this binary tree with iterative pre-order traversal (DFS).
Start at the given node and visit each node with the given function.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Traverse pre-order without using recursion (stretch challenge)
def items_post_order(self):
"""Return a post-order list of all items in this binary search tree."""
items = []
if not self.is_empty():
# Traverse tree post-order from root, appending each node's item
self._traverse_post_order_recursive(self.root, items.append)
# Return post-order list of all items in tree
return items
def _traverse_post_order_recursive(self, node, visit):
"""Traverse this binary tree with recursive post-order traversal (DFS).
Start at the given node and visit each node with the given function.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Traverse left subtree, if it exists
if node:
self._traverse_post_order_recursive(node.left,visit)
self._traverse_post_order_recursive(node.right, visit)
visit(node.data)
# TODO: Traverse right subtree, if it exists
...
# TODO: Visit this node's data with given function
...
def _traverse_post_order_iterative(self, node, visit):
"""Traverse this binary tree with iterative post-order traversal (DFS).
Start at the given node and visit each node with the given function.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Traverse post-order without using recursion (stretch challenge)
# def items_level_order(self):
# """Return a level-order list of all items in this binary search tree."""
# items = []
# if not self.is_empty():
# # Traverse tree level-order from root, appending each node's item
# self._traverse_level_order_iterative(self.root, items.append):
# # Return level-order list of all items in tree
# return items
def _traverse_level_order_iterative(self, start_node, visit):
"""Traverse this binary tree with iterative level-order traversal (BFS).
Start at the given node and visit each node with the given function.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Create queue to store nodes not yet traversed in level-order
queue = ...
# TODO: Enqueue given starting node
...
# TODO: Loop until queue is empty
while ...:
# TODO: Dequeue node at front of queue
node = ...
# TODO: Visit this node's data with given function
...
# TODO: Enqueue this node's left child, if it exists
...
# TODO: Enqueue this node's right child, if it exists
...
def test_binary_search_tree():
# Create a complete binary search tree of 3, 7, or 15 items in level-order
# items = [2, 1, 3]
# items = [4, 2, 6, 1, 3, 5, 7]
items = [8, 4, 12, 2, 6, 10, 14, 1, 3, 5, 7, 9, 11, 13, 15]
# items = ['h','d','i','b','e','a','f','c','j','g']
# items = ['B','A','C']
print('items: {}'.format(items))
tree = BinarySearchTree()
print('tree: {}'.format(tree))
print('root: {}'.format(tree.root))
print('\nInserting items:')
for item in items:
tree.insert(item)
print('insert({}), size: {}'.format(item, tree.size))
print(tree.height())
print('root: {} left: {} right: {}'.format(tree.root.data, tree.root.left,tree.root.right ))
print('\nSearching for items:')
for item in items:
result = tree.search(item)
print('search({}): {}'.format(item, result))
# item = 123
# result = tree.search(item)
print('search({}): {}'.format(item, result))
#
# print('\nTraversing items:')
print('items in-order: {}'.format(tree.items_in_order()))
print('items pre-order: {}'.format(tree.items_pre_order()))
print('items post-order: {}'.format(tree.items_post_order()))
# print('items level-order: {}'.format(tree.items_level_order()))
if __name__ == '__main__':
test_binary_search_tree()
|
[
"ajboxjr@gmail.com"
] |
ajboxjr@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.