blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e487f15efe7b9383e9f8e34e2b22deb1e0ffbaea | 7ea1beb4e0442cc494b53700a9494c4eb05ad9bb | /flaskboot/ConfigProject/config.py | 54a8a203ae0f7654037344553388184142909fd3 | [] | no_license | liwei123o0/FlaskBBS | 9fd26bcd133a16d57903cbcd5d66c412babb3a1a | 92360a7d8bf8667d314ca6d0839323346f314bf8 | refs/heads/master | 2020-11-26T16:33:02.668878 | 2015-09-28T08:48:14 | 2015-09-28T08:48:14 | 42,153,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | # -*- coding: utf-8 -*-
#! /usr/bin/env python
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = 'liweiCDK'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
FLASK_MAIL_SUBJECT_PREFIX = '[ConfigProject]'
FLASK_MAIL_SENDER = 'liweijavakf@163.com'
FLASK_ADMIN = 'liwei'
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
MAIL_SERVER = 'smtp.163.com'
MAIL_PORT = 25
MAIL_USE_TLS = True
MAIL_USERNAME = 'liweijavakf@163.com'
MAIL_PASSWORD = 'liwei429'
SQLALCHEMY_DATABASE_URI = 'mysql://root:root@127.0.0.1/flask'
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'configdata.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'configdata.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig,
}
| [
"877129310@qq.com"
] | 877129310@qq.com |
e44fc1f322c7a90cc9b2e7b0aaace1324bb3de3f | 6bb80d482bfd0cd5feb6f2d37c7235a27b3466d6 | /pretrained-model/tts/fastspeech2/fastspeech2-female.py | 56bbeb54f559a80c3b1728ea952fe26a7fa3979b | [
"MIT"
] | permissive | dakmatt/malaya-speech | deadb00e1aa8a03593721c26457f35158e67d96d | 957cfb1952760c30d3b4a2a2e60b7f142394cbd3 | refs/heads/master | 2023-04-03T13:56:53.675046 | 2021-04-19T03:31:40 | 2021-04-19T03:31:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,872 | py | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import tensorflow as tf
import numpy as np
from glob import glob
from itertools import cycle
import tensorflow as tf
import malaya_speech
import malaya_speech.train
import malaya_speech.config
import malaya_speech.train as train
from malaya_speech.train.model import fastspeech2
from malaya_speech.train.loss import calculate_2d_loss, calculate_3d_loss
from functools import partial
import json
import re
with open('mels-female.json') as fopen:
files = json.load(fopen)
def norm_mean_std(x, mean, std):
zero_idxs = np.where(x == 0.0)[0]
x = (x - mean) / std
x[zero_idxs] = 0.0
return x
def average_by_duration(x, durs):
mel_len = durs.sum()
durs_cum = np.cumsum(np.pad(durs, (1, 0)))
x_char = np.zeros((durs.shape[0],), dtype = np.float32)
for idx, start, end in zip(range(mel_len), durs_cum[:-1], durs_cum[1:]):
values = x[start:end][np.where(x[start:end] != 0.0)[0]]
x_char[idx] = np.mean(values) if len(values) > 0 else 0.0
return x_char.astype(np.float32)
def get_alignment(f):
f = f"tacotron2-female-alignment/{f.split('/')[-1]}"
if os.path.exists(f):
return np.load(f)
else:
return None
f0_stat = np.load('../speech-bahasa/female-stats-v2/stats_f0.npy')
energy_stat = np.load('../speech-bahasa/female-stats-v2/stats_energy.npy')
reduction_factor = 1
maxlen = 1008
minlen = 32
pad_to = 8
data_min = 1e-2
_pad = 'pad'
_start = 'start'
_eos = 'eos'
_punctuation = "!'(),.:;? "
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_rejected = '\'():;"'
MALAYA_SPEECH_SYMBOLS = (
[_pad, _start, _eos] + list(_special) + list(_punctuation) + list(_letters)
)
total_steps = 300_000
def generate(files):
file_cycle = cycle(files)
while True:
f = next(file_cycle).decode()
mel = np.load(f)
mel_length = len(mel)
if mel_length > maxlen or mel_length < minlen:
continue
alignment = get_alignment(f)
if alignment is None:
continue
stop_token_target = np.zeros([len(mel)], dtype = np.float32)
text_ids = np.load(f.replace('mels', 'text_ids'), allow_pickle = True)[
0
]
text_ids = ''.join(
[
c
for c in text_ids
if c in MALAYA_SPEECH_SYMBOLS and c not in _rejected
]
)
text_ids = re.sub(r'[ ]+', ' ', text_ids).strip()
text_input = np.array(
[MALAYA_SPEECH_SYMBOLS.index(c) for c in text_ids]
)
num_pad = pad_to - ((len(text_input) + 2) % pad_to)
text_input = np.pad(
text_input, ((1, 1)), 'constant', constant_values = ((1, 2))
)
text_input = np.pad(
text_input, ((0, num_pad)), 'constant', constant_values = 0
)
num_pad = pad_to - ((len(mel) + 1) % pad_to) + 1
pad_value_mel = np.log(data_min)
mel = np.pad(
mel,
((0, num_pad), (0, 0)),
'constant',
constant_values = pad_value_mel,
)
stop_token_target = np.pad(
stop_token_target, ((0, num_pad)), 'constant', constant_values = 1
)
len_mel = [len(mel)]
len_text_ids = [len(text_input)]
f0 = np.load(f.replace('mels', 'f0s'))
f0 = norm_mean_std(f0, f0_stat[0], f0_stat[1])
f0 = average_by_duration(f0, alignment)
len_f0 = [len(f0)]
energy = np.load(f.replace('mels', 'energies'))
energy = norm_mean_std(energy, energy_stat[0], energy_stat[1])
energy = average_by_duration(energy, alignment)
len_energy = [len(energy)]
yield {
'mel': mel,
'text_ids': text_input,
'len_mel': len_mel,
'len_text_ids': len_text_ids,
'stop_token_target': stop_token_target,
'f0': f0,
'len_f0': len_f0,
'energy': energy,
'len_energy': len_energy,
'f': [f],
'alignment': alignment,
}
def get_dataset(files, batch_size = 32, shuffle_size = 32, thread_count = 24):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{
'mel': tf.float32,
'text_ids': tf.int32,
'len_mel': tf.int32,
'len_text_ids': tf.int32,
'stop_token_target': tf.float32,
'f0': tf.float32,
'len_f0': tf.int32,
'energy': tf.float32,
'len_energy': tf.int32,
'f': tf.string,
'alignment': tf.int32,
},
output_shapes = {
'mel': tf.TensorShape([None, 80]),
'text_ids': tf.TensorShape([None]),
'len_mel': tf.TensorShape([1]),
'len_text_ids': tf.TensorShape([1]),
'stop_token_target': tf.TensorShape([None]),
'f0': tf.TensorShape([None]),
'len_f0': tf.TensorShape([1]),
'energy': tf.TensorShape([None]),
'len_energy': tf.TensorShape([1]),
'f': tf.TensorShape([1]),
'alignment': tf.TensorShape([None]),
},
args = (files,),
)
dataset = dataset.padded_batch(
shuffle_size,
padded_shapes = {
'mel': tf.TensorShape([None, 80]),
'text_ids': tf.TensorShape([None]),
'len_mel': tf.TensorShape([1]),
'len_text_ids': tf.TensorShape([1]),
'stop_token_target': tf.TensorShape([None]),
'f0': tf.TensorShape([None]),
'len_f0': tf.TensorShape([1]),
'energy': tf.TensorShape([None]),
'len_energy': tf.TensorShape([1]),
'f': tf.TensorShape([1]),
'alignment': tf.TensorShape([None]),
},
padding_values = {
'mel': tf.constant(0, dtype = tf.float32),
'text_ids': tf.constant(0, dtype = tf.int32),
'len_mel': tf.constant(0, dtype = tf.int32),
'len_text_ids': tf.constant(0, dtype = tf.int32),
'stop_token_target': tf.constant(0, dtype = tf.float32),
'f0': tf.constant(0, dtype = tf.float32),
'len_f0': tf.constant(0, dtype = tf.int32),
'energy': tf.constant(0, dtype = tf.float32),
'len_energy': tf.constant(0, dtype = tf.int32),
'f': tf.constant('', dtype = tf.string),
'alignment': tf.constant(0, dtype = tf.int32),
},
)
return dataset
return get
def model_fn(features, labels, mode, params):
input_ids = features['text_ids']
input_lengths = features['len_text_ids'][:, 0]
mel_outputs = features['mel']
mel_lengths = features['len_mel'][:, 0]
energies = features['energy']
energies_lengths = features['len_energy'][:, 0]
f0s = features['f0']
f0s_lengths = features['len_f0'][:, 0]
batch_size = tf.shape(f0s)[0]
alignment = features['alignment']
config = malaya_speech.config.fastspeech2_config
config = fastspeech2.Config(
vocab_size = len(MALAYA_SPEECH_SYMBOLS), **config
)
model = fastspeech2.Model(config)
mel_before, mel_after, duration_outputs, f0_outputs, energy_outputs = model(
input_ids, alignment, f0s, energies, training = True
)
mse = tf.losses.mean_squared_error
mae = tf.losses.absolute_difference
log_duration = tf.math.log(tf.cast(tf.math.add(alignment, 1), tf.float32))
duration_loss = mse(log_duration, duration_outputs)
max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)
mask = tf.sequence_mask(
lengths = mel_lengths, maxlen = max_length, dtype = tf.float32
)
mask = tf.expand_dims(mask, axis = -1)
mel_loss_before = mae(
labels = mel_outputs, predictions = mel_before, weights = mask
)
mel_loss_after = mae(
labels = mel_outputs, predictions = mel_after, weights = mask
)
max_length = tf.cast(tf.reduce_max(energies_lengths), tf.int32)
mask = tf.sequence_mask(
lengths = energies_lengths, maxlen = max_length, dtype = tf.float32
)
energies_loss = mse(
labels = energies, predictions = energy_outputs, weights = mask
)
max_length = tf.cast(tf.reduce_max(f0s_lengths), tf.int32)
mask = tf.sequence_mask(
lengths = f0s_lengths, maxlen = max_length, dtype = tf.float32
)
f0s_loss = mse(labels = f0s, predictions = f0_outputs, weights = mask)
loss = (
duration_loss
+ mel_loss_before
+ mel_loss_after
+ energies_loss
+ f0s_loss
)
tf.identity(loss, 'loss')
tf.identity(duration_loss, name = 'duration_loss')
tf.identity(mel_loss_before, name = 'mel_loss_before')
tf.identity(mel_loss_after, name = 'mel_loss_after')
tf.identity(energies_loss, name = 'energies_loss')
tf.identity(f0s_loss, name = 'f0s_loss')
tf.summary.scalar('duration_loss', duration_loss)
tf.summary.scalar('mel_loss_before', mel_loss_before)
tf.summary.scalar('mel_loss_after', mel_loss_after)
tf.summary.scalar('energies_loss', energies_loss)
tf.summary.scalar('f0s_loss', f0s_loss)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = train.optimizer.adamw.create_optimizer(
loss,
init_lr = 0.001,
num_train_steps = total_steps,
num_warmup_steps = int(0.02 * total_steps),
end_learning_rate = 0.00005,
weight_decay_rate = 0.001,
beta_1 = 0.9,
beta_2 = 0.98,
epsilon = 1e-6,
clip_norm = 1.0,
)
estimator_spec = tf.estimator.EstimatorSpec(
mode = mode, loss = loss, train_op = train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode = tf.estimator.ModeKeys.EVAL, loss = loss
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
[
'loss',
'duration_loss',
'mel_loss_before',
'mel_loss_after',
'energies_loss',
'f0s_loss',
],
every_n_iter = 1,
)
]
train_dataset = get_dataset(files['train'])
dev_dataset = get_dataset(files['test'])
train.run_training(
train_fn = train_dataset,
model_fn = model_fn,
model_dir = 'fastspeech2-female',
num_gpus = 1,
log_step = 1,
save_checkpoint_step = 2000,
max_steps = total_steps,
eval_fn = dev_dataset,
train_hooks = train_hooks,
)
| [
"husein.zol05@gmail.com"
] | husein.zol05@gmail.com |
885459dea8753a0fa00de87c86459898fdf2ae8c | 491f9ca49bbb275c99248134c604da9fb43ee9fe | /P4_Mini_Project_NEURON/CaHVA_Allen/SK_Allen/plot_caproperties.py | 341397e1b2367ed75c37814b6a32e6e17ed72645 | [] | no_license | KineOdegardHanssen/PhD-subprojects | 9ef0facf7da4b2a80b4bea9c890aa04f0ddcfd1a | c275539689b53b94cbb85c0fdb3cea5885fc40e9 | refs/heads/Windows | 2023-06-08T13:32:15.179813 | 2023-06-05T08:40:10 | 2023-06-05T08:40:10 | 195,783,664 | 2 | 0 | null | 2020-08-18T14:42:21 | 2019-07-08T09:49:14 | Python | UTF-8 | Python | false | false | 4,369 | py | import os
from os.path import join
import sys
import matplotlib.pyplot as plt
import json
import neuron
import time as tm
import numpy as np
from matplotlib import gridspec
iamp = 0.006
idur = 1000
dtexp = -7
v_init = -86
somasize = 10
cm_factor = 1.0
t_before_rec = -600.0
conc_at_halfopen = 0.00043287612810830614
gcahva = 0.2
gsk = 1.0
namestring = ''
namestring = namestring + '_gSK'+str(gsk)+'p'
namestring = namestring + '_gCaHVA'+str(gcahva)+'p'
namestring = namestring +'_'
folder = 'Results/Soma%i/current_idur%i_iamp'%(somasize,idur)+str(iamp)+'/'
if os.path.exists(folder)==False:
os.mkdir(folder)
#t, v, eca, cai, cao
filename = folder+namestring+'somaonly_cm'+str(cm_factor)+'_idur%i_iamp'%idur+str(iamp)+'_dtexp%i_vinit' % dtexp+str(v_init)+'_trec'+str(t_before_rec)+'_V_eca.txt'
data = np.loadtxt(filename)
t = data[:, 0]
v = data[:, 1]
eca = data[:, 2]
cai = data[:, 3]
cao = data[:, 4]
I_SK = data[:, 5]
I_Ca_HVA = data[:, 6]
g_SK = data[:, 7]
g_Ca_HVA = data[:, 8]
#fig, (ax1, ax2, ax3) = plt.subplots(3,1,figsize=(5,11),dpi=300)
fig = plt.figure(figsize=(10,8),dpi=300)#(figsize=(8,3),dpi=300)
gs = gridspec.GridSpec(4, 4)
ax1 = plt.subplot(gs[0, 0:2])
ax2 = plt.subplot(gs[1, 0:2])
ax3 = plt.subplot(gs[2, 0:2])
ax4 = plt.subplot(gs[3, 0:2])
ax5 = plt.subplot(gs[0, 2:4])
ax6 = plt.subplot(gs[1, 2:4])
ax7 = plt.subplot(gs[2, 2:4])
ax8 = plt.subplot(gs[3, 2:4])
ax1.plot(t,v)
ax1.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax1.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
#ax1.set_xlabel('V (mV)')
ax1.set_ylabel(r'$V$ (mV)',fontsize=12)
ax1.set_title(r'$I=$ %s nA' % str(iamp),fontsize=16)
ax1.set_title('A', loc='left',fontsize=18)
ax2.plot(t,eca,color='k')
ax2.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax2.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
#ax1.set_xlabel('V (mV)')
ax2.set_ylabel(r'$E_\mathregular{Ca}$',fontsize=12)
ax2.set_title(r'$E_\mathregular{Ca}$',fontsize=16)
ax2.set_title('A', loc='left',fontsize=18)
ax3.plot(t,cai,color='tab:brown')
ax3.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax3.axhline(y=conc_at_halfopen,color='k',linestyle='--',linewidth=0.75)
ax3.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
#ax2.set_xlabel('t (ms)',fontsize=12)
ax3.set_ylabel(r'Concentration (mM)',fontsize=12)
ax3.set_title(r'$\left[\mathregular{Ca}^{2+}\right]_\mathregular{in}$',fontsize=16)
ax3.set_title('B', loc='left',fontsize=18)
ax4.plot(t,cao,color='tab:brown')
ax4.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax4.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
ax4.set_xlabel(r'$t$ (ms)',fontsize=12)
ax4.set_ylabel(r'Concentration (mM)',fontsize=12)
ax4.set_title(r'$\left[\mathregular{Ca}^{2+}\right]_\mathregular{out}$',fontsize=16)
ax4.set_title('C', loc='left',fontsize=18)
ax5.plot(t,I_SK,color='tab:gray')
#ax1.set_xlabel('V (mV)')
ax5.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax5.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
ax5.set_ylabel(r'$I_\mathregular{SK}$ (nA)',fontsize=12)
ax5.set_title(r'$I_\mathregular{SK}$',fontsize=16)
ax5.set_title('D', loc='left',fontsize=18)
ax6.plot(t,I_Ca_HVA,color='tab:gray')
#ax1.set_xlabel('V (mV)')
ax6.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax6.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
ax6.set_ylabel(r'$I_\mathregular{CaHVA}$ (nA)',fontsize=12)
ax6.set_title(r'$I_\mathregular{CaHVA}$',fontsize=16)
ax6.set_title('E', loc='left',fontsize=18)
ax7.plot(t,g_SK,color='tab:purple')
#ax1.set_xlabel('V (mV)')
ax7.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax7.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
ax7.set_ylabel(r'$g_\mathregular{SK}$ (S/cm$^2$)',fontsize=12)
ax7.set_title(r'$g_\mathregular{SK}$',fontsize=16)
ax7.set_title('F', loc='left',fontsize=18)
ax8.plot(t,g_Ca_HVA,color='tab:purple')
#ax1.set_xlabel('V (mV)')
ax8.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax8.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
ax8.set_xlabel(r'$t$ (ms)',fontsize=12)
ax8.set_ylabel(r'$g_\mathregular{CaHVA}$ (S/cm$^2$)',fontsize=12)
ax8.set_title(r'$g_\mathregular{CaHVA}$',fontsize=16)
ax8.set_title('G', loc='left',fontsize=18)
plt.tight_layout()
plt.savefig('Results/Soma%i/Ca-properties/Ca_E_and_concentrations_iamp'%somasize+str(iamp)+'_idur'+str(idur)+'.png')
plt.show()
| [
"noreply@github.com"
] | KineOdegardHanssen.noreply@github.com |
8632d96a8605aa4d7038e5a4711dc6e00361121a | e3765def4a180f1d51eaef3884448b0bb9be2cd3 | /example/09.4.6_nested_moudle/my_car.py | 01d9bc8bea5bdf6af985b22f91b6c3e5552edeab | [] | no_license | spearfish/python-crash-course | cbeb254efdf0c1ab37d8a7d2fa0409194f19fa2b | 66bc42d41395cc365e066a597380a96d3282d30b | refs/heads/master | 2023-07-14T11:04:49.276764 | 2021-08-20T10:02:27 | 2021-08-20T10:02:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | #!/usr/bin/env python3
from electric_car import ElectricCar
tesla = ElectricCar('tesla', 'model s', 2020)
print(tesla.get_descriptive_name())
| [
"jingchen@tutanota.com"
] | jingchen@tutanota.com |
a9475d4d9623e92ea5a6ec88961e95168f7ea56e | 50db76c3c6f1d56d454d9d8411f2c7969ce906a8 | /scrapeNews/scrapeNews/spiders/oneindiaHindi.py | 86d4846bda58de2ba9506653c15dad404951b9cf | [] | no_license | anujaagarwal/scrape | 4199ec4ea353235b8f9e254215210a3783480365 | 6c2e70920b40bb99f7fe287f8dce8179d68cad99 | refs/heads/master | 2021-09-04T14:43:54.752275 | 2018-01-19T15:53:05 | 2018-01-19T15:53:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,828 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapeNews.items import ScrapenewsItem
import logging
loggerError = logging.getLogger("scrapeNewsError")
class OneindiahindiSpider(scrapy.Spider):
name = 'oneindiaHindi'
allowed_domains = ['oneindia.com']
def __init__(self, pages=4, *args, **kwargs):
super(OneindiahindiSpider, self).__init__(*args, **kwargs)
for count in range(1 , int(pages)+1):
self.start_urls.append('https://hindi.oneindia.com/news/india/?page-no='+ str(count))
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url, self.parse)
def parse(self, response):
newsContainer = response.xpath('//div[@id="collection-wrapper"]/article')
for newsBox in newsContainer:
link = 'https://hindi.oneindia.com/news/india/' + newsBox.xpath('div/h2/a/@href').extract_first()
yield scrapy.Request(url=link, callback=self.parse_article)
def parse_article(self, response):
item = ScrapenewsItem() # Scraper Items
item['image'] = self.getPageImage(response)
item['title'] = self.getPageTitle(response)
item['content'] = self.getPageContent(response)
item['newsDate'] = self.getPageDate(response)
item['link'] = response.url
item['source'] = 110
if item['image'] is not 'Error' or item['title'] is not 'Error' or item['content'] is not 'Error' or item['newsDate'] is not 'Error':
yield item
def getPageContent(self, response):
try:
data = ' '.join((''.join(response.xpath("//div[contains(@class,'io-article-body')]/p/text()").extract())).split(' ')[:40])
except:
loggerError.error(response.url)
data = 'Error'
return data
def getPageTitle(self, response):
data = response.xpath("//h1[contains(@class,'heading')]/text()").extract_first()
if (data is None):
loggerError.error(response.url)
data = 'Error'
return data
def getPageImage(self, response):
data = 'https://hindi.oneindia.com' + response.xpath("//img[contains(@class,'image_listical')]/@src").extract_first()
if (data is None):
data = 'https://hindi.oneindia.com' + response.xpath("//img[contains(@class,'image_listical')]/@data-pagespeed-lazy-src").extract_first()
if (data is None):
loggerError.error(response.url)
data = 'Error'
return data
def getPageDate(self, response):
try:
data = (response.xpath("//time/@datetime").extract_first()).rsplit('+',1)[0]
except Exception as Error:
loggerError.error(str(Error) + ' occured at: ' + response.url)
data = 'Error'
finally:
return data
| [
"ajay39in@gmail.com"
] | ajay39in@gmail.com |
ea63686d38514e03cb78a5213fb4e6ce1e1402d7 | 677c21c723a6d6003f8e8804bbd98d42992301c9 | /oz/plugins/json_api/options.py | 896060d8262e74682a27fd63e5da9c1f9c88436a | [] | no_license | xoxoj/oz | ebd95f53f6a34ac8c7e9f2210411d852492328b3 | 70b6b64c87c9f06e6edce2736b4c7d1394ca5cb5 | refs/heads/master | 2021-01-20T16:34:32.050599 | 2014-08-28T20:11:07 | 2014-08-28T20:11:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from __future__ import absolute_import, division, print_function, with_statement, unicode_literals
import oz
oz.option("allow_jsonp", type=bool, default=True, help="Whether to allow for JSONP requests")
| [
"simonson@gmail.com"
] | simonson@gmail.com |
000de5eb783d8e4b5a0eaca28d77da1405077226 | 20860030d52b5be62cb797e396a5a6b83f45dc44 | /bravehub_shared/utils/dynamic_object.py | 8ca5b1b1dbcae64bd938af10720c6743fdebedf5 | [] | no_license | rcosnita/bravehub | 189d30c46224dd80d6fbf41c50a33559ec2f44ae | 960bcfdb3c2e53e81aa75f7a48980e4918cfd4bb | refs/heads/master | 2022-12-21T11:28:16.626690 | 2018-02-17T10:43:09 | 2018-02-17T10:43:09 | 98,259,347 | 0 | 1 | null | 2022-12-19T13:27:11 | 2017-07-25T03:17:44 | Python | UTF-8 | Python | false | false | 514 | py | """Provides a very simple implementation which creates dynamic objects starting from a dictionary.
"""
class DynamicObject(dict):
"""Provides a simple wrapper which allows us to simulate an object based on the dictionary
keys."""
def __init__(self, data=None):
super().__init__()
self._data = data or {}
self.update(self._data)
def __getattr__(self, name):
value = super(DynamicObject, self).get(name)
if isinstance(value, dict):
return DynamicObject(value)
return value
| [
"radu.cosnita@gmail.com"
] | radu.cosnita@gmail.com |
c267fba6099b86c7555a9dd5b18cd541c76b8015 | 2da6c42227de4c414dffa9dfd2da97862847e147 | /Algorismica avançada - UB/Pau/(ALGA)Algorismica avançada/(ALGA)Algorismica avançada/cristhian/practicas/p3/ex3_CarmonaTorresCristhian.py | 626de3d785184d8fadc5c76a0613b49a6de32bff | [] | no_license | fitigf15/PYTHON-VICTOR | 9aa4d0233532d5a58c4c9ec9ca02f069a5a5c2cc | 864ee3136839f2d507efae5c18a455a9f392020f | refs/heads/master | 2020-03-30T19:17:33.179271 | 2015-04-15T19:18:18 | 2015-04-15T19:18:18 | 24,456,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,478 | py | # Cristhian Carmona Torres
def leerFichero(nombre):
''' funcion que trata fichero de entrada y lo carga en matriz'''
file=open(nombre,"r")
text=file.readlines()
file.close()
global ma
ma =[[0]*9 for x in xrange(9)]
for fila in range(len(text)): # O(1)
for colum in range(9): # O(1)
if text[fila][colum]=='.':
ma[fila][colum] = 0
else:
ma[fila][colum] = int(text[fila][colum])
def sudokuVer(i, j, sol, inicial):
'''funcion que devuelve una matriz con la solucion del sudoku'''
if (i==9) and (j == 0):
print '***SUDOKU RESUELTO***'
for fi in sol:
print fi
else:
# Busca solucion si la casilla esta vacia/False
if not(inicial[i][j]):
# Comprueba posibles soluciones, desde 0 hasta 9
for k in range(1,10): # O(n)
sol[i][j] = k # Asigna solucion candidata
if satisfact(i, j, sol): # Comprueba si es satisfact
if (j == 8): # Cuando haya llegado al fin de las columnas empezara desde 0 otra vez...
sudokuVer(i+1, 0, sol, inicial)
else: # ...sino ira a la siguiente columna
sudokuVer(i, j+1, sol, inicial)
sol[i][j] = 0 # si no es satisfact volvera a colocar un 0 en la matriz
else: # Caso casilla inicial ocupada
if (j == 8):
sudokuVer(i+1, 0, sol, inicial)
else:
sudokuVer(i, j+1, sol, inicial)
return False
def sudoku(sol):
''' Programa que busca la solucion a un sudoku inicial'''
# Inicializamos la matriz auxiliar de booleanos
# Nos dira que casilla esta libre y la cual necesitamos buscar una solucion
inicial=[[False]*9 for x in xrange(9)]
print '***INICIAL SIN RESOLVER***'
for a in sol:
print a
# Inicializamos la matriz con los booleanos
for i in range(9): # O(1)
for j in range(9):
inicial[i][j] = sol[i][j]!=0
# Ejecutamos funcion que resuelve sudoku
sudokuVer(0, 0, sol, inicial)
def satisfact(i, j, sol):
''' Comprueba si es factible o no un posible resultado en una posicion '''
valido = True
k = 0
# Recorre columnas y verifica si existe el posible resultado, False si existe.
while ((k<9) and valido):
if (sol[i][j] == sol[k][j]) and (k!=i):
valido = False
k = k + 1
l = 0
# Recorre filas y verifica si existe el posible resultado, False si existe.
while ((l<9) and valido): # O(1)
if (sol[i][j] == sol[i][l]) and (l!=j):
valido = False
l = l + 1
# comprobamos dentro de la region de la posicion a solucionar
k = obtieneRegion(i)
l = obtieneRegion(j)
a = k
b = l
# Si no se repite en la misma fila y columna entonces busca en su region
while ((k < a+3) and valido): #O(1)
while ((l < b+3) and valido):
if (sol[i][j] == sol[k][l]) and (i!=k) and (j!=l):
valido = False
l = l + 1
k = k + 1
l = obtieneRegion(j)
return valido
def obtieneRegion(i):
''' funcion que devuelve la region en la que se encuentra la posicion[i,j] '''
cas = 0
region = 0
if ((i+1)%3 == 0):
cas = (i+3)/3
else:
cas = ((i+1)/3) + 1
if (cas == 1): region = 0 # empezara a recorrer desde la posicion 0: Region 1
if (cas == 2): region = 3 # empezara a recorrer desde la posicion 3: Region 2
if (cas == 3): region = 6 # empezara a recorrer desde la posicion 6: Region 3
return region
#----Codigo a ejecutar-----
leerFichero("sudoku1.txt")
sudoku(ma)
'''
sudo1 = [[1,2,0,0,5,0,7,0,0],
[8,4,0,3,7,0,5,0,1],
[9,0,0,0,4,2,0,6,8],
[5,0,8,0,2,0,9,0,0],
[6,0,2,8,3,0,1,5,4],
[7,3,4,0,1,0,0,8,0],
[0,0,0,0,0,0,4,0,0],
[4,6,1,0,0,3,8,7,5],
[3,5,9,0,8,4,6,1,0]]
-----------------------------
SOLUCION SUDOKU 1
[1,2,3,6,5,8,7,4,9]
[8,4,6,3,7,9,5,2,1]
[9,7,5,1,4,2,3,6,8]
[5,1,8,4,2,6,9,3,7]
[6,9,2,8,3,7,1,5,4]
[7,3,4,9,1,5,2,8,6]
[2,8,7,5,6,1,4,9,3]
[4,6,1,2,9,3,8,7,5]
[3,5,9,7,8,4,6,1,2]
'''
| [
"fiti.gf15@gmail.com"
] | fiti.gf15@gmail.com |
a9028b7299ff6a4d6d81002e6ae5b4392b63bfe8 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /mt4E3MYkoJASY8TE6_5.py | 3d585d3deedacfff68f630c5e47a071f21f443c9 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py |
from itertools import product
def adjacent(key):
return {
'0' : ('0', '8'),
'1' : ('1', '2', '4'),
'2' : ('1', '2', '3', '5'),
'3' : ('2', '3', '6'),
'4' : ('1', '4', '5', '7'),
'5' : ('2', '4', '5', '6', '8'),
'6' : ('3', '5', '6', '9'),
'7' : ('4', '7', '8'),
'8' : ('0', '5', '7', '8', '9'),
'9' : ('6', '8', '9')
}[key]
def crack_pincode(pincode):
codes = product(*map(adjacent, pincode))
return [''.join(code) for code in codes]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
2e5868c9ca64c5e782be4e32e551b32d93c088f3 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/snooz/test_config_flow.py | d8bac9ea18c5ba84d897872cd512bb1979645857 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 11,912 | py | """Test the Snooz config flow."""
from __future__ import annotations
import asyncio
from asyncio import Event
from unittest.mock import patch
from homeassistant import config_entries
from homeassistant.components.snooz import DOMAIN
from homeassistant.const import CONF_ADDRESS, CONF_NAME, CONF_TOKEN
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResultType
from . import (
NOT_SNOOZ_SERVICE_INFO,
SNOOZ_SERVICE_INFO_NOT_PAIRING,
SNOOZ_SERVICE_INFO_PAIRING,
TEST_ADDRESS,
TEST_PAIRING_TOKEN,
TEST_SNOOZ_DISPLAY_NAME,
)
from tests.common import MockConfigEntry
async def test_async_step_bluetooth_valid_device(hass: HomeAssistant) -> None:
"""Test discovery via bluetooth with a valid device."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_BLUETOOTH},
data=SNOOZ_SERVICE_INFO_PAIRING,
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "bluetooth_confirm"
await _test_setup_entry(hass, result["flow_id"])
async def test_async_step_bluetooth_waits_to_pair(hass: HomeAssistant) -> None:
"""Test discovery via bluetooth with a device that's not in pairing mode, but enters pairing mode to complete setup."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_BLUETOOTH},
data=SNOOZ_SERVICE_INFO_NOT_PAIRING,
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "bluetooth_confirm"
await _test_pairs(hass, result["flow_id"])
async def test_async_step_bluetooth_retries_pairing(hass: HomeAssistant) -> None:
"""Test discovery via bluetooth with a device that's not in pairing mode, times out waiting, but eventually complete setup."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_BLUETOOTH},
data=SNOOZ_SERVICE_INFO_NOT_PAIRING,
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "bluetooth_confirm"
retry_id = await _test_pairs_timeout(hass, result["flow_id"])
await _test_pairs(hass, retry_id)
async def test_async_step_bluetooth_not_snooz(hass: HomeAssistant) -> None:
"""Test discovery via bluetooth not Snooz."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_BLUETOOTH},
data=NOT_SNOOZ_SERVICE_INFO,
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "not_supported"
async def test_async_step_user_no_devices_found(hass: HomeAssistant) -> None:
"""Test setup from service info cache with no devices found."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "no_devices_found"
async def test_async_step_user_with_found_devices(hass: HomeAssistant) -> None:
"""Test setup from service info cache with devices found."""
with patch(
"homeassistant.components.snooz.config_flow.async_discovered_service_info",
return_value=[SNOOZ_SERVICE_INFO_PAIRING],
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "user"
assert result["data_schema"]
# ensure discovered devices are listed as options
assert result["data_schema"].schema["name"].container == [TEST_SNOOZ_DISPLAY_NAME]
await _test_setup_entry(
hass, result["flow_id"], {CONF_NAME: TEST_SNOOZ_DISPLAY_NAME}
)
async def test_async_step_user_with_found_devices_waits_to_pair(
hass: HomeAssistant,
) -> None:
"""Test setup from service info cache with devices found that require pairing mode."""
with patch(
"homeassistant.components.snooz.config_flow.async_discovered_service_info",
return_value=[SNOOZ_SERVICE_INFO_NOT_PAIRING],
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "user"
await _test_pairs(hass, result["flow_id"], {CONF_NAME: TEST_SNOOZ_DISPLAY_NAME})
async def test_async_step_user_with_found_devices_retries_pairing(
hass: HomeAssistant,
) -> None:
"""Test setup from service info cache with devices found that require pairing mode, times out, then completes."""
with patch(
"homeassistant.components.snooz.config_flow.async_discovered_service_info",
return_value=[SNOOZ_SERVICE_INFO_NOT_PAIRING],
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "user"
user_input = {CONF_NAME: TEST_SNOOZ_DISPLAY_NAME}
retry_id = await _test_pairs_timeout(hass, result["flow_id"], user_input)
await _test_pairs(hass, retry_id, user_input)
async def test_async_step_user_device_added_between_steps(hass: HomeAssistant) -> None:
"""Test the device gets added via another flow between steps."""
with patch(
"homeassistant.components.snooz.config_flow.async_discovered_service_info",
return_value=[SNOOZ_SERVICE_INFO_PAIRING],
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "user"
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_ADDRESS,
data={CONF_NAME: TEST_SNOOZ_DISPLAY_NAME, CONF_TOKEN: TEST_PAIRING_TOKEN},
)
entry.add_to_hass(hass)
with patch("homeassistant.components.snooz.async_setup_entry", return_value=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_NAME: TEST_SNOOZ_DISPLAY_NAME},
)
assert result2["type"] == FlowResultType.ABORT
assert result2["reason"] == "already_configured"
async def test_async_step_user_with_found_devices_already_setup(
hass: HomeAssistant,
) -> None:
"""Test setup from service info cache with devices found."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_ADDRESS,
data={CONF_NAME: TEST_SNOOZ_DISPLAY_NAME, CONF_TOKEN: TEST_PAIRING_TOKEN},
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.snooz.config_flow.async_discovered_service_info",
return_value=[SNOOZ_SERVICE_INFO_PAIRING],
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "no_devices_found"
async def test_async_step_bluetooth_devices_already_setup(hass: HomeAssistant) -> None:
"""Test we can't start a flow if there is already a config entry."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_ADDRESS,
data={CONF_NAME: TEST_SNOOZ_DISPLAY_NAME, CONF_TOKEN: TEST_PAIRING_TOKEN},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_BLUETOOTH},
data=SNOOZ_SERVICE_INFO_PAIRING,
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "already_configured"
async def test_async_step_bluetooth_already_in_progress(hass: HomeAssistant) -> None:
"""Test we can't start a flow for the same device twice."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_BLUETOOTH},
data=SNOOZ_SERVICE_INFO_PAIRING,
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "bluetooth_confirm"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_BLUETOOTH},
data=SNOOZ_SERVICE_INFO_PAIRING,
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "already_in_progress"
async def test_async_step_user_takes_precedence_over_discovery(
hass: HomeAssistant,
) -> None:
"""Test manual setup takes precedence over discovery."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_BLUETOOTH},
data=SNOOZ_SERVICE_INFO_PAIRING,
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "bluetooth_confirm"
with patch(
"homeassistant.components.snooz.config_flow.async_discovered_service_info",
return_value=[SNOOZ_SERVICE_INFO_PAIRING],
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
)
assert result["type"] == FlowResultType.FORM
await _test_setup_entry(
hass, result["flow_id"], {CONF_NAME: TEST_SNOOZ_DISPLAY_NAME}
)
# Verify the original one was aborted
assert not hass.config_entries.flow.async_progress()
async def _test_pairs(
hass: HomeAssistant, flow_id: str, user_input: dict | None = None
) -> None:
pairing_mode_entered = Event()
async def _async_process_advertisements(
_hass, _callback, _matcher, _mode, _timeout
):
await pairing_mode_entered.wait()
service_info = SNOOZ_SERVICE_INFO_PAIRING
assert _callback(service_info)
return service_info
with patch(
"homeassistant.components.snooz.config_flow.async_process_advertisements",
_async_process_advertisements,
):
result = await hass.config_entries.flow.async_configure(
flow_id,
user_input=user_input or {},
)
assert result["type"] == FlowResultType.SHOW_PROGRESS
assert result["step_id"] == "wait_for_pairing_mode"
pairing_mode_entered.set()
await hass.async_block_till_done()
await _test_setup_entry(hass, result["flow_id"], user_input)
async def _test_pairs_timeout(
hass: HomeAssistant, flow_id: str, user_input: dict | None = None
) -> str:
with patch(
"homeassistant.components.snooz.config_flow.async_process_advertisements",
side_effect=asyncio.TimeoutError(),
):
result = await hass.config_entries.flow.async_configure(
flow_id, user_input=user_input or {}
)
assert result["type"] == FlowResultType.SHOW_PROGRESS
assert result["step_id"] == "wait_for_pairing_mode"
await hass.async_block_till_done()
result2 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result2["type"] == FlowResultType.FORM
assert result2["step_id"] == "pairing_timeout"
return result2["flow_id"]
async def _test_setup_entry(
hass: HomeAssistant, flow_id: str, user_input: dict | None = None
) -> None:
with patch("homeassistant.components.snooz.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
flow_id,
user_input=user_input or {},
)
assert result["type"] == FlowResultType.CREATE_ENTRY
assert result["data"] == {
CONF_ADDRESS: TEST_ADDRESS,
CONF_TOKEN: TEST_PAIRING_TOKEN,
}
assert result["result"].unique_id == TEST_ADDRESS
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
0baa3f08c72a3132e3304442ac9fc9bf099e582e | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2008-EOL/desktop/kde/autostart/actions.py | 5576e5aca75b62900a5da327c29d6bfaba4e4595 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import kde
from pisi.actionsapi import pisitools
def setup():
kde.make("-f admin/Makefile.common")
kde.configure()
def build():
kde.make()
def install():
kde.install()
pisitools.remove("/usr/kde/3.5/share/applications/kde/autostart.desktop")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
4e2a40798b083b2083363cb89d385a8e009f8911 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_210/ch41_2020_03_05_10_24_22_440616.py | bae73a3be87a4021f3fe122e005c570cb255fec7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | def zera_negativos(lista):
for j, each in enumerate(lista):
if each < 0:
lista[j] = 0
return lista | [
"you@example.com"
] | you@example.com |
25642237bc42a97e075903094c6cd7f7410f8680 | b99ccdd014aaa364053f8aaec062963efc0d0e71 | /reconfigure/configs/squid.py | bfd164219cd08ffc4e3fae97dbb7e3a83465538b | [] | no_license | Eugeny/reconfigure | c6eac546b9b50aaf33290f8cedf61dd55c77e9a3 | ff1115dede4b80222a2618d0e7657cafa36a2573 | refs/heads/master | 2020-12-24T17:55:04.489144 | 2020-11-25T16:10:28 | 2020-11-25T16:10:28 | 5,739,146 | 63 | 18 | null | 2020-09-03T14:15:22 | 2012-09-09T16:14:15 | Python | UTF-8 | Python | false | false | 419 | py | from reconfigure.configs.base import Reconfig
from reconfigure.parsers import SquidParser
from reconfigure.builders import BoundBuilder
from reconfigure.items.squid import SquidData
class SquidConfig (Reconfig):
def __init__(self, **kwargs):
k = {
'parser': SquidParser(),
'builder': BoundBuilder(SquidData),
}
k.update(kwargs)
Reconfig.__init__(self, **k)
| [
"e@ajenti.org"
] | e@ajenti.org |
2cb70f597c0ac62ed2b36ad1c6c4716587f40acf | 64eb6c8e01e3cc0a149e9d93ee1dd103660c8156 | /Hack2-master/dih/bin/pip3.5 | f94c6ed7430a160b7a592d6479e72fa904a6abc3 | [] | no_license | Saifinbox/MVP1 | 41a58c6cbd067d29bfb5b2cc4610667490c27bc6 | e2bd14f8ab010b34d1a6a924caf390688e184026 | refs/heads/master | 2021-06-26T07:48:26.877118 | 2017-09-14T04:26:59 | 2017-09-14T04:26:59 | 103,485,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | 5 | #!/home/inbox-dih/Desktop/Hack2/dih/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"muhammad.saif@inboxbiz.com"
] | muhammad.saif@inboxbiz.com |
25308f8a5496cc03cee6fff110e023d4129c69ed | 122306a7e284774ac80c6fc381e4db2f26ea0c30 | /emission/analysis/modelling/tour_model_first_only/evaluation_pipeline.py | dddcd7d9529bf77f50cbdc1e830d94582ab15d67 | [
"BSD-3-Clause"
] | permissive | e-mission/e-mission-server | 4da028e34eaac32c0d27ec929f68b23905e6ca1e | 94e7478e627fa8c171323662f951c611c0993031 | refs/heads/master | 2023-09-01T06:53:33.926971 | 2023-08-15T23:39:00 | 2023-08-15T23:39:00 | 26,307,245 | 26 | 131 | BSD-3-Clause | 2023-09-14T05:08:59 | 2014-11-07T06:39:02 | Jupyter Notebook | UTF-8 | Python | false | false | 8,870 | py | # Standard imports
import numpy as np
import pandas as pd
import jsonpickle as jpickle
import logging
# Our imports
import emission.storage.timeseries.abstract_timeseries as esta
import emission.analysis.modelling.tour_model.similarity as similarity
import emission.analysis.modelling.tour_model.label_processing as lp
import emission.analysis.modelling.tour_model.data_preprocessing as preprocess
import emission.analysis.modelling.tour_model.second_round_of_clustering as sr
import emission.analysis.modelling.tour_model.get_users as gu
def second_round(bin_trips,filter_trips,first_labels,track,low,dist_pct,sim,kmeans):
sec = sr.SecondRoundOfClustering(bin_trips,first_labels)
first_label_set = list(set(first_labels))
for l in first_label_set:
sec.get_sel_features_and_trips(first_labels,l)
sec.hierarcial_clustering(low, dist_pct)
if kmeans:
sec.kmeans_clustering()
new_labels = sec.get_new_labels(first_labels)
track = sec.get_new_track(track)
# get request percentage for the subset for the second round
percentage_second = grp.get_req_pct(new_labels, track, filter_trips, sim)
# get homogeneity score for the second round
homo_second = gs.score(bin_trips, new_labels)
return percentage_second,homo_second
# we use functions in similarity to build the first round of clustering
def first_round(data,radius):
sim = similarity.similarity(data, radius, shouldFilter=False, cutoff=False)
filter_trips = sim.data
sim.fit()
bins = sim.bins
bin_trips = sim.data
return sim, bins, bin_trips, filter_trips
def get_first_label(bins):
# get first round labels
# the labels from the first round are the indices of bins
# e.g. in bin 0 [trip1, trip2, trip3], the labels of this bin is [0,0,0]
first_labels = []
for b in range(len(bins)):
for trip in bins[b]:
first_labels.append(b)
return first_labels
def get_track(bins, first_labels):
# create a list idx_labels_track to store indices and labels
# the indices of the items will be the same in the new label list after the second round clustering
# item[0] is the original index of the trip in filter_trips
# item[1] is the label from the first round of clustering
idx_labels_track = []
for bin in bins:
for ori_idx in bin:
idx_labels_track.append([ori_idx])
# store first round labels in idx_labels_track list
for i in range(len(first_labels)):
idx_labels_track[i].append(first_labels[i])
return idx_labels_track
def get_first_label_and_track(bins,bin_trips,filter_trips):
gs.compare_trip_orders(bins, bin_trips, filter_trips)
first_labels = get_first_label(bins)
track = get_track(bins, first_labels)
return first_labels,track
def tune(data,radius,kmeans):
sim, bins, bin_trips, filter_trips = first_round(data, radius)
# it is possible that we don't have common trips for tuning or testing
# bins contain common trips indices
if len(bins) is not 0:
first_labels, track = get_first_label_and_track(bins,bin_trips,filter_trips)
# collect tuning scores and parameters
tune_score = {}
for dist_pct in np.arange(0.15, 0.6, 0.02):
for low in range(250, 600):
percentage_second, homo_second = second_round(bin_trips,filter_trips,first_labels,track,low,dist_pct,
sim,kmeans)
curr_score = gs.get_score(homo_second, percentage_second)
if curr_score not in tune_score:
tune_score[curr_score] = (low, dist_pct)
best_score = max(tune_score)
sel_tradeoffs = tune_score[best_score]
low = sel_tradeoffs[0]
dist_pct = sel_tradeoffs[1]
else:
low = 0
dist_pct = 0
return low,dist_pct
def test(data,radius,low,dist_pct,kmeans):
sim, bins, bin_trips, filter_trips = first_round(data, radius)
# it is possible that we don't have common trips for tuning or testing
# bins contain common trips indices
if len(bins) is not 0:
first_labels, track = get_first_label_and_track(bins,bin_trips,filter_trips)
# new_labels temporary stores the labels from the first round, but later the labels in new_labels will be
# updated with the labels after two rounds of clustering.
new_labels = first_labels.copy()
# get request percentage for the subset for the first round
percentage_first = grp.get_req_pct(new_labels, track, filter_trips, sim)
# get homogeneity score for the subset for the first round
homo_first = gs.score(bin_trips, first_labels)
percentage_second, homo_second = second_round(bin_trips, filter_trips, first_labels, track, low, dist_pct,
sim, kmeans)
else:
percentage_first = 1
homo_first = 1
percentage_second = 1
homo_second = 1
scores = gs.get_score(homo_second, percentage_second)
return homo_first,percentage_first,homo_second,percentage_second,scores
def main(all_users):
radius = 100
all_filename = []
for a, user in enumerate(all_users):
logging.info(f"Starting evaluation for {user}")
df = pd.DataFrame(columns=['user','user_id','percentage of 1st round','homogeneity socre of 1st round',
'percentage of 2nd round','homogeneity socre of 2nd roun','scores','lower boundary',
'distance percentage'])
logging.info(f"At stage: Reading data")
trips = preprocess.read_data(user)
logging.info(f"At stage: Filtering data")
filter_trips = preprocess.filter_data(trips, radius)
# filter out users that don't have enough valid labeled trips
if not gu.valid_user(filter_trips, trips):
logging.warn(f"User {user} is invalid, early return")
continue
logging.info(f"At stage: Splitting data")
tune_idx, test_idx = preprocess.split_data(filter_trips)
# choose tuning/test set to run the model
# this step will use KFold (5 splits) to split the data into different subsets
# - tune: tuning set
# - test: test set
# Here we user a bigger part of the data for testing and a smaller part for tuning
tune_data = preprocess.get_subdata(filter_trips, test_idx)
test_data = preprocess.get_subdata(filter_trips, tune_idx)
# tune data
for i, curr_tune in enumerate(tune_data):
logging.info(f"At stage: starting tuning for stage {i}")
# for tuning, we don't add kmeans for re-clustering. We just need to get tuning parameters
# - low: the lower boundary of the dendrogram. If the final distance of the dendrogram is lower than "low",
# this bin no need to be re-clutered.
# - dist_pct: the higher boundary of the dendrogram. If the final distance is higher than "low",
# the cutoff of the dendrogram is (the final distance of the dendrogram * dist_pct)
low, dist_pct = tune(curr_tune, radius, kmeans=False)
df.loc[i,'lower boundary']=low
df.loc[i,'distance percentage']=dist_pct
# testing
for i, curr_test in enumerate(test_data):
logging.info(f"At stage: starting testing for stage {i}")
low = df.loc[i,'lower boundary']
dist_pct = df.loc[i,'distance percentage']
# for testing, we add kmeans to re-build the model
homo_first, percentage_first, homo_second, percentage_second, scores = test(curr_test,radius,low,
dist_pct,kmeans=True)
df.loc[i, 'percentage of 1st round'] = percentage_first
df.loc[i, 'homogeneity socre of 1st round'] = homo_first
df.loc[i, 'percentage of 2nd round'] = percentage_second
df.loc[i, 'homogeneity socre of 2nd round'] = homo_second
df.loc[i, 'scores'] = scores
df['user_id'] = user
df['user']='user'+str(a+1)
logging.info(f"At stage: parameter selection outputs complete")
filename = "user_" + str(user) + ".csv"
all_filename.append(filename)
df.to_csv(filename, index=True, index_label='split')
# collect filename in a file, use it to plot the scatter
collect_filename = jpickle.dumps(all_filename)
with open("collect_filename", "w") as fd:
fd.write(collect_filename)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',
level=logging.DEBUG)
all_users = esta.TimeSeries.get_uuid_list()
main(all_users)
| [
"shankari@eecs.berkeley.edu"
] | shankari@eecs.berkeley.edu |
71fab5d2fe8bbbe019765dfe164587d5dc579854 | 8259dd9ee47ed3cfa75315ccb6ab04859432b049 | /speaker/bosch.py | 7e4434d9cae41582a2c6a91f4ebcdee17663524d | [
"MIT"
] | permissive | shannon-jia/speaker | ef83572bf64d766d8abd21c3ed6483cdbc6ff7e0 | 31c642f018725dd4878ef6a4e7a19b12b05774c8 | refs/heads/master | 2020-03-22T02:30:25.199727 | 2018-07-02T00:59:03 | 2018-07-02T00:59:03 | 139,374,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,271 | py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
import asyncio
import logging
log = logging.getLogger(__name__)
class TcpClientProtocol(asyncio.Protocol):
def __init__(self, master):
self.master = master
def connection_made(self, transport):
self.transport = transport
self.master.connected = True
def data_received(self, data):
log.debug('Data received: {!r}'.format(data.decode()))
def connection_lost(self, exc):
log.error('The server closed the connection')
self.master.connected = None
class Bosch(object):
TYPE_OIP_Login = b'\x02\x70\x44\x00\x22\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x61\x64\x6d\x69\x6e\x05\x00\x00\x00\x61\x64\x6d\x69\x6e'
TYPE_OIP_StartCall = b'\x03\x70\x44\x00\x39\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x50\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x41\x4c\x4c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x78\x69\x61\x6f\x66\x61\x6e\x67'
TYPE_OIP_KeepAlive = b'\x27\x70\x44\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def __init__(self, loop, host, port,
user='admin', passwd='admin'):
self.loop = loop
self.host = host
self.port = port
self.user = user
self.passwd = passwd
self.connected = None
self.loop.create_task(self._do_connect())
self.transport = None
self.loop.call_later(6, self.keepAlive)
async def _do_connect(self):
while True:
await asyncio.sleep(5)
if self.connected:
continue
try:
xt, _ = await self.loop.create_connection(
lambda: TcpClientProtocol(self),
self.host,
self.port)
log.info('Connection create on {}'.format(xt))
self.transport = xt
self.login()
except OSError:
log.error('Server not up retrying in 5 seconds...')
except Exception as e:
log.error('Error when connect to server: {}'.format(e))
def call(self, cmd):
if self.transport:
self.transport.write(cmd)
log.debug('send cmd to server: {}'.format(cmd))
else:
log.error('Invalid server transport.')
def login(self):
log.info('send cmd to server: [login]')
self.call(self.TYPE_OIP_Login)
def keepAlive(self):
log.info('send cmd to server: [keepAlive]')
self.call(self.TYPE_OIP_KeepAlive)
self.loop.call_later(5, self.keepAlive)
def startCall(self):
log.info('send cmd to server: [startCall]')
self.call(self.TYPE_OIP_StartCall)
class EchoServerClientProtocol(asyncio.Protocol):
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
print('======== Server =========: Connection from {}'.format(peername))
self.transport = transport
def data_received(self, data):
message = data.decode()
print('======== Server =========: Data received: {!r}'.format(message))
print('======== Server =========: Send: {!r}'.format(message))
self.transport.write(data)
#
# print('Close the client socket')
# self.transport.close()
if __name__ == '__main__':
log = logging.getLogger("")
formatter = logging.Formatter("%(asctime)s %(levelname)s " +
"[%(module)s:%(lineno)d] %(message)s")
# log the things
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
log.addHandler(ch)
loop = asyncio.get_event_loop()
bosch = Bosch(loop,
'127.0.0.1',
8888)
coro = loop.create_server(EchoServerClientProtocol, '127.0.0.1', 8888)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
| [
"lishengchen@mingvale.com"
] | lishengchen@mingvale.com |
70f27d8290cfbcd65cf2f995d061568351e6eb90 | e3e6fc037f47527e6bc43f1d1300f39ac8f0aabc | /google/appengine/v1/appengine_pb2_grpc.py | 60fb23ba9b3aba51f3ec15d92a40ea11c42f8de6 | [] | no_license | msachtler/bazel-event-protocol-parser | 62c136cb1f60f4ee3316bf15e1e5a5e727445536 | d7424d21aa0dc121acc4d64b427ba365a3581a20 | refs/heads/master | 2021-07-05T15:13:19.502829 | 2017-09-24T04:15:16 | 2017-09-24T04:15:16 | 102,999,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,239 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.appengine.v1 import appengine_pb2 as google_dot_appengine_dot_v1_dot_appengine__pb2
from google.appengine.v1 import application_pb2 as google_dot_appengine_dot_v1_dot_application__pb2
from google.appengine.v1 import instance_pb2 as google_dot_appengine_dot_v1_dot_instance__pb2
from google.appengine.v1 import service_pb2 as google_dot_appengine_dot_v1_dot_service__pb2
from google.appengine.v1 import version_pb2 as google_dot_appengine_dot_v1_dot_version__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
class InstancesStub(object):
"""Manages instances of a version.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListInstances = channel.unary_unary(
'/google.appengine.v1.Instances/ListInstances',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListInstancesRequest.SerializeToString,
response_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListInstancesResponse.FromString,
)
self.GetInstance = channel.unary_unary(
'/google.appengine.v1.Instances/GetInstance',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.GetInstanceRequest.SerializeToString,
response_deserializer=google_dot_appengine_dot_v1_dot_instance__pb2.Instance.FromString,
)
self.DeleteInstance = channel.unary_unary(
'/google.appengine.v1.Instances/DeleteInstance',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.DeleteInstanceRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.DebugInstance = channel.unary_unary(
'/google.appengine.v1.Instances/DebugInstance',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.DebugInstanceRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
class InstancesServicer(object):
"""Manages instances of a version.
"""
def ListInstances(self, request, context):
"""Lists the instances of a version.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetInstance(self, request, context):
"""Gets instance information.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteInstance(self, request, context):
"""Stops a running instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DebugInstance(self, request, context):
"""Enables debugging on a VM instance. This allows you to use the SSH
command to connect to the virtual machine where the instance lives.
While in "debug mode", the instance continues to serve live traffic.
You should delete the instance when you are done debugging and then
allow the system to take over and determine if another instance
should be started.
Only applicable for instances in App Engine flexible environment.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_InstancesServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListInstances': grpc.unary_unary_rpc_method_handler(
servicer.ListInstances,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListInstancesRequest.FromString,
response_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListInstancesResponse.SerializeToString,
),
'GetInstance': grpc.unary_unary_rpc_method_handler(
servicer.GetInstance,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.GetInstanceRequest.FromString,
response_serializer=google_dot_appengine_dot_v1_dot_instance__pb2.Instance.SerializeToString,
),
'DeleteInstance': grpc.unary_unary_rpc_method_handler(
servicer.DeleteInstance,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.DeleteInstanceRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'DebugInstance': grpc.unary_unary_rpc_method_handler(
servicer.DebugInstance,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.DebugInstanceRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.appengine.v1.Instances', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class VersionsStub(object):
"""Manages versions of a service.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListVersions = channel.unary_unary(
'/google.appengine.v1.Versions/ListVersions',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListVersionsRequest.SerializeToString,
response_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListVersionsResponse.FromString,
)
self.GetVersion = channel.unary_unary(
'/google.appengine.v1.Versions/GetVersion',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.GetVersionRequest.SerializeToString,
response_deserializer=google_dot_appengine_dot_v1_dot_version__pb2.Version.FromString,
)
self.CreateVersion = channel.unary_unary(
'/google.appengine.v1.Versions/CreateVersion',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.CreateVersionRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.UpdateVersion = channel.unary_unary(
'/google.appengine.v1.Versions/UpdateVersion',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.UpdateVersionRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.DeleteVersion = channel.unary_unary(
'/google.appengine.v1.Versions/DeleteVersion',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.DeleteVersionRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
class VersionsServicer(object):
"""Manages versions of a service.
"""
def ListVersions(self, request, context):
"""Lists the versions of a service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetVersion(self, request, context):
"""Gets the specified Version resource.
By default, only a `BASIC_VIEW` will be returned.
Specify the `FULL_VIEW` parameter to get the full resource.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateVersion(self, request, context):
"""Deploys code and resource files to a new version.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateVersion(self, request, context):
"""Updates the specified Version resource.
You can specify the following fields depending on the App Engine
environment and type of scaling that the version resource uses:
* [`serving_status`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status):
For Version resources that use basic scaling, manual scaling, or run in
the App Engine flexible environment.
* [`instance_class`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.instance_class):
For Version resources that run in the App Engine standard environment.
* [`automatic_scaling.min_idle_instances`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling):
For Version resources that use automatic scaling and run in the App
Engine standard environment.
* [`automatic_scaling.max_idle_instances`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling):
For Version resources that use automatic scaling and run in the App
Engine standard environment.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteVersion(self, request, context):
"""Deletes an existing Version resource.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_VersionsServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListVersions': grpc.unary_unary_rpc_method_handler(
servicer.ListVersions,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListVersionsRequest.FromString,
response_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListVersionsResponse.SerializeToString,
),
'GetVersion': grpc.unary_unary_rpc_method_handler(
servicer.GetVersion,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.GetVersionRequest.FromString,
response_serializer=google_dot_appengine_dot_v1_dot_version__pb2.Version.SerializeToString,
),
'CreateVersion': grpc.unary_unary_rpc_method_handler(
servicer.CreateVersion,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.CreateVersionRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'UpdateVersion': grpc.unary_unary_rpc_method_handler(
servicer.UpdateVersion,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.UpdateVersionRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'DeleteVersion': grpc.unary_unary_rpc_method_handler(
servicer.DeleteVersion,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.DeleteVersionRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.appengine.v1.Versions', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class ServicesStub(object):
"""Manages services of an application.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListServices = channel.unary_unary(
'/google.appengine.v1.Services/ListServices',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListServicesRequest.SerializeToString,
response_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListServicesResponse.FromString,
)
self.GetService = channel.unary_unary(
'/google.appengine.v1.Services/GetService',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.GetServiceRequest.SerializeToString,
response_deserializer=google_dot_appengine_dot_v1_dot_service__pb2.Service.FromString,
)
self.UpdateService = channel.unary_unary(
'/google.appengine.v1.Services/UpdateService',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.UpdateServiceRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.DeleteService = channel.unary_unary(
'/google.appengine.v1.Services/DeleteService',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.DeleteServiceRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
class ServicesServicer(object):
"""Manages services of an application.
"""
def ListServices(self, request, context):
"""Lists all the services in the application.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetService(self, request, context):
"""Gets the current configuration of the specified service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateService(self, request, context):
"""Updates the configuration of the specified service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteService(self, request, context):
"""Deletes the specified service and all enclosed versions.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ServicesServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListServices': grpc.unary_unary_rpc_method_handler(
servicer.ListServices,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListServicesRequest.FromString,
response_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.ListServicesResponse.SerializeToString,
),
'GetService': grpc.unary_unary_rpc_method_handler(
servicer.GetService,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.GetServiceRequest.FromString,
response_serializer=google_dot_appengine_dot_v1_dot_service__pb2.Service.SerializeToString,
),
'UpdateService': grpc.unary_unary_rpc_method_handler(
servicer.UpdateService,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.UpdateServiceRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'DeleteService': grpc.unary_unary_rpc_method_handler(
servicer.DeleteService,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.DeleteServiceRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.appengine.v1.Services', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class ApplicationsStub(object):
"""Manages App Engine applications.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetApplication = channel.unary_unary(
'/google.appengine.v1.Applications/GetApplication',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.GetApplicationRequest.SerializeToString,
response_deserializer=google_dot_appengine_dot_v1_dot_application__pb2.Application.FromString,
)
self.RepairApplication = channel.unary_unary(
'/google.appengine.v1.Applications/RepairApplication',
request_serializer=google_dot_appengine_dot_v1_dot_appengine__pb2.RepairApplicationRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
class ApplicationsServicer(object):
"""Manages App Engine applications.
"""
def GetApplication(self, request, context):
"""Gets information about an application.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RepairApplication(self, request, context):
"""Recreates the required App Engine features for the application in your
project, for example a Cloud Storage bucket or App Engine service account.
Use this method if you receive an error message about a missing feature,
for example "*Error retrieving the App Engine service account*".
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ApplicationsServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetApplication': grpc.unary_unary_rpc_method_handler(
servicer.GetApplication,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.GetApplicationRequest.FromString,
response_serializer=google_dot_appengine_dot_v1_dot_application__pb2.Application.SerializeToString,
),
'RepairApplication': grpc.unary_unary_rpc_method_handler(
servicer.RepairApplication,
request_deserializer=google_dot_appengine_dot_v1_dot_appengine__pb2.RepairApplicationRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.appengine.v1.Applications', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
"matt.sachtler@gmail.com"
] | matt.sachtler@gmail.com |
a338ca98eb0376c40f734caf4fa5facdaac9648d | 8a82a83655f118208692e55d7804d9fa480ad4b6 | /book/oreilly/Mining.the.Social.Web/python_code/mailboxes__jsonify_mbox.py | 2680ac963760cd12584897c178e5e3f7916a77b7 | [
"BSD-2-Clause-Views",
"BSD-2-Clause"
] | permissive | xenron/sandbox-da-python | 0814159da9a91923e4b66c5e40057e381f765e96 | ab8f1c0d57fdc6006355f613012b84165068c315 | refs/heads/master | 2020-04-12T05:41:33.182110 | 2016-12-14T22:57:33 | 2016-12-14T22:57:33 | 60,324,979 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py | # -*- coding: utf-8 -*-
import sys
import mailbox
import email
import quopri
import json
from BeautifulSoup import BeautifulSoup
MBOX = sys.argv[1]
OUT_FILE = sys.argv[2]
def cleanContent(msg):
# Decode message from "quoted printable" format
msg = quopri.decodestring(msg)
# Strip out HTML tags, if any are present
soup = BeautifulSoup(msg)
return ''.join(soup.findAll(text=True))
def jsonifyMessage(msg):
json_msg = {'parts': []}
for (k, v) in msg.items():
json_msg[k] = v.decode('utf-8', 'ignore')
# The To, CC, and Bcc fields, if present, could have multiple items
# Note that not all of these fields are necessarily defined
for k in ['To', 'Cc', 'Bcc']:
if not json_msg.get(k):
continue
json_msg[k] = json_msg[k].replace('\n', '').replace('\t', '').replace('\r'
, '').replace(' ', '').decode('utf-8', 'ignore').split(',')
try:
for part in msg.walk():
json_part = {}
if part.get_content_maintype() == 'multipart':
continue
json_part['contentType'] = part.get_content_type()
content = part.get_payload(decode=False).decode('utf-8', 'ignore')
json_part['content'] = cleanContent(content)
json_msg['parts'].append(json_part)
except Exception, e:
sys.stderr.write('Skipping message - error encountered (%s)\n' % (str(e), ))
finally:
return json_msg
# There's a lot of data to process, so use a generator to do it. See http://wiki.python.org/moin/Generators
# Using a generator requires a trivial custom encoder be passed to json for serialization of objects
class Encoder(json.JSONEncoder):
def default(self, o): return list(o)
# The generator itself...
def gen_json_msgs(mb):
while 1:
msg = mb.next()
if msg is None:
break
yield jsonifyMessage(msg)
mbox = mailbox.UnixMailbox(open(MBOX, 'rb'), email.message_from_file)
json.dump(gen_json_msgs(mbox),open(OUT_FILE, 'wb'), indent=4, cls=Encoder)
| [
"xenron@outlook.com"
] | xenron@outlook.com |
8bb7448cd9061324f96d92191702074a4a55d7e4 | 4b0467a0e75e632a56af2a2ebc0abe257fd88544 | /fasttext_mlp/main.py | 57d72d023b737ef6904698db7b44ab371739eb8f | [] | no_license | sobamchan/simple_roc_cloze | 89f903d0fb100e4ee979c543f586a3223b3eac27 | ed24eeb615d8348acf7ad5c0c112a94c011600bf | refs/heads/master | 2020-03-27T22:15:54.703835 | 2018-09-13T13:01:56 | 2018-09-13T13:01:56 | 147,217,710 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,564 | py | import os
import argparse
from pathlib import Path
import torch
from fasttext_mlp.trainer import Trainer
from logger import Logger
def getargs():
p = argparse.ArgumentParser()
p.add_argument('--odir', type=str)
p.add_argument('--gpu-id', default=2)
p.add_argument('--no-cuda', action='store_false')
p.add_argument('--epochs', type=int, default=1000)
dpath = '../DATA/ROC/cloze_test_val__spring2016_cloze_test_ALL_val'
p.add_argument('--ddir', type=str,
default=dpath)
dpath = '../DATA/ROC/test_set_spring_2016.csv'
p.add_argument('--test-path', type=str,
default=dpath)
p.add_argument('--ftpath', type=str, default='../DATA/wiki.en.bin')
p.add_argument('--bsz', type=int, default=32)
p.add_argument('--lr', type=float, default=0.1)
p.add_argument('--optim-type', type=str, default='sgd')
p.add_argument('--nlayers', type=int, default=3)
p.add_argument('--nemb', type=int, default=300)
p.add_argument('--nhidden', type=int, default=500)
return p.parse_args()
def main(args, logger):
args.odir = Path(args.odir)
t = Trainer(
args.ddir,
args.bsz,
args.ftpath,
args.nlayers,
args.nemb,
args.nhidden,
args.lr,
args.optim_type,
args.use_cuda
)
best_acc = -1
lr = args.lr
for iepc in range(1, args.epochs + 1):
logger.log('%dth epoch' % iepc)
tr_loss = t.train_one_epoch(iepc)
val_acc, val_loss = t.evaluate()
if best_acc < val_acc:
best_acc = val_acc
logger.log('Best accuracy achived: %f!!!' % val_acc)
t.make_submission(args.test_path, args.odir)
logger.log('Making submission to %s' % args.odir)
else:
for pg in t.optimizer.param_groups:
lr *= 0.8
pg['lr'] = lr
logger.log('Decrease lr to %f' % lr)
logger.dump({
'epoch': iepc,
'tr_loss': tr_loss,
'val_loss': val_loss,
'val_acc': val_acc,
})
if __name__ == '__main__':
args = getargs()
logger = Logger(args.odir)
# GPU usage
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
logger.log('using GPU id: %s' % os.environ['CUDA_VISIBLE_DEVICES'])
if not args.no_cuda and torch.cuda.is_available():
args.use_cuda = True
else:
args.use_cuda = False
logger.log(str(args))
main(args, logger)
| [
"oh.sore.sore.soutarou@gmail.com"
] | oh.sore.sore.soutarou@gmail.com |
d86338a9bec602205cf0f357d8f1b2622b5cb005 | 5051f0d301e1e6ab8cb434c50c95697036d4d7ae | /02_colors/create_image.py | 6824ae559bbf17e842fabedb907ad1ca0f97c841 | [] | no_license | Python-Repository-Hub/image_processing_with_python | 41590e36d466f2e7017a832419ed1827f305c23f | d0b6a32c893e0c0ed9c9638bbf1632d950adabf8 | refs/heads/main | 2023-08-31T10:59:11.551382 | 2021-10-20T14:07:02 | 2021-10-20T14:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # create_image.py
from PIL import Image
from PIL import ImageColor
def create_image(path, size):
image = Image.new("RGBA", size)
red = ImageColor.getcolor("red", "RGBA")
green = ImageColor.getcolor("green", "RGBA")
color = red
count = 0
for y in range(size[1]):
for x in range(size[0]):
if count == 5:
# swap colors
color = red if red != color else green
count = 0
image.putpixel((x, y), color)
count += 1
image.save(path)
if __name__ == "__main__":
create_image("lines.png", (150, 150))
| [
"mike@pythonlibrary.org"
] | mike@pythonlibrary.org |
75d2fa1bf0944c5ed7d6312b208a487ecb4b9b66 | ac617cb51ae396d932ee843af58becad8cf64e53 | /gammapy/utils/distributions/tests/test_general_random.py | f49e4b60262ea41fa93a4b91d61da5d931e5c9a6 | [
"BSD-3-Clause"
] | permissive | dlennarz/gammapy | aef7f14bb84385d017c180b425b5bc3800beb343 | e7e386047f7dfd7d1e50edd5b2615f484b98c664 | refs/heads/master | 2020-04-05T17:58:22.383376 | 2019-05-28T13:28:57 | 2019-05-28T13:28:57 | 40,043,940 | 0 | 0 | null | 2019-05-28T12:39:39 | 2015-08-01T10:59:02 | Python | UTF-8 | Python | false | false | 354 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from ..general_random import GeneralRandom
def f(x):
return x ** 2
def test_general_random():
general_random = GeneralRandom(f, 0, 3)
vals = general_random.draw(random_state=42)
assert_allclose(vals.mean(), 2.229301, atol=1e-4)
| [
"Deil.Christoph@gmail.com"
] | Deil.Christoph@gmail.com |
ef6f0cf116e3e5302a0e5eaa1724bd496d65f9e4 | 0c6baaf9324e5ff2af96e23c44a62e5f6962e502 | /module11_VCF.py | 831f4c9939b1992537447faec6f96db8292bad38 | [] | no_license | friedpine/modules | 8d77c7f88bf09884adf57115d60111afdae6d049 | 49af041990e58fa985dacbaf7c8edaa15fbd74a1 | refs/heads/master | 2016-09-06T17:17:18.556680 | 2015-05-25T07:28:52 | 2015-05-25T07:28:52 | 20,153,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,453 | py | import re, sys, os, copy
import subprocess
import time
import cPickle as pickle
import infra01_pos2info as in1
import MySQLdb as mb
import d00_sample as d00
def read_VCF_file(path,DB_NAME,tablename,limit,counts,samples):
conn=mb.connect(host="localhost",user="root",passwd="123456",db=DB_NAME)
cursor = conn.cursor()
sample_infos = ''
for sample in samples:
sample_info = " %s_G varchar(5) DEFAULT NULL,%s_0 int(11) DEFAULT NULL,%s_1 int(11) DEFAULT NULL,%s_2 int(11) DEFAULT NULL,%s_DP int(11) DEFAULT NULL,%s_GQ int(11) DEFAULT NULL," %(sample,sample,sample,sample,sample,sample)
sample_infos += sample_info
sql = """CREATE TABLE %s (
`chr` varchar(20) NOT NULL DEFAULT '',
`pos` int(11) NOT NULL DEFAULT '0',
`Ref` varchar(30) DEFAULT NULL,
`Alt` varchar(30) NOT NULL DEFAULT '',
`Qual` float DEFAULT NULL,
`DP` int(11) DEFAULT NULL,
`FQ` float DEFAULT NULL,
`AF1` float DEFAULT NULL,
`AC1` float DEFAULT NULL,
%s
PRIMARY KEY (`chr`,`pos`,`Alt`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1""" %(tablename,sample_infos)
try:
cursor.execute(sql)
except:
print "EXISTS"
file = open(path)
values = []
for line in file:
if re.search('#',line):
continue
t = re.split('\s*',line)
info = {}
for i in re.split(';',t[7]):
a = re.split('=',i)
if len(a)>1:
info[a[0]] = a[1]
if len(t[3])>limit:
t[3]=t[3][0:20]
continue
if len(t[4])>limit:
t[4]=t[4][0:limit]
continue
value = (t[0],t[1],t[3],t[4],t[5],info['DP'],info['FQ'],info['AF1'],info['AC1'])
for i in range(counts):
value += tuple(re.split(':|,',t[9+i]))
if len(value)!=9+counts*6:
a = 10
else:
values.append(value)
cmd = "insert into "+tablename+" values(%s"+",%s"*(8+counts*6)+")"
cursor.executemany(cmd,values);
conn.commit()
cursor.close()
conn.close()
def read_VCF_file_single(cursor,conn,DB_NAME,tablename,samples,type):
limit = 30
sample_infos = ''
for sample in samples:
sample_info = " %s_DP varchar(5) DEFAULT '0',%s_alt float DEFAULT '0'," %(sample,sample)
sample_infos += sample_info
sql = """CREATE TABLE %s (
`chr` varchar(20) NOT NULL DEFAULT '',
`pos` int(11) NOT NULL DEFAULT '0',
`Ref` varchar(30) DEFAULT NULL,
`Alt` varchar(30) NOT NULL DEFAULT '',
%s
PRIMARY KEY (`chr`,`pos`,`Alt`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1""" %(tablename,sample_infos)
print sql
try:
cursor.execute(sql)
except:
print "EXISTS"
for sample in samples:
path = d00.get_sample_file(cursor,sample,type)
file = open(path)
values = []
for line in file:
if re.search('#',line):
continue
t = re.split('\s*',line)
info = {}
for i in re.split(';',t[7]):
a = re.split('=',i)
if len(a)>1:
info[a[0]] = a[1]
if 'DP4' not in info:
continue
DP4 = re.split(',',info['DP4'])
if len(t[3])>limit:
t[3]=t[3][0:limit]
continue
if len(t[4])>limit:
t[4]=t[4][0:limit]
continue
value = (t[0],t[1],t[3],t[4],info['DP'],float(int(DP4[2])+int(DP4[3]))/int(info['DP']))
values.append(value)
cmd = "insert into %s (chr,pos,Ref,Alt,%s,%s)values(%%s,%%s,%%s,%%s,%%s,%%s) on duplicate key update %s=values(%s),%s=values(%s)" %(tablename,sample+'_DP',sample+'_alt',sample+'_DP',sample+'_DP',sample+'_alt',sample+'_alt')
print cmd,values[0]
cursor.executemany(cmd,values)
conn.commit()
cursor.close()
conn.close()
class SNP(dict):
def __init__(self):
print "SNP class welcomes you!"
# def read_VCF_file(self,path,sample_names):
# self['samples'] = sample_names
# file = open(path)
# values = []
# for line in file:
# if re.search('#',line):
# continue
# t = re.split('\s*',line)
# info = re.split(t[7]
def find_good_quality_SNP_pos(self,group,names,goodsize,QUAL_off,GQ_off,rec):
self['groupnames'] = names
self[rec] = {}
indexs = []
for i in range(len(names)):
temp = []
for j in range(len(group)):
if group[j] == i:
temp.append(j)
indexs.append(temp)
for chr in self['chrs']:
for pos in self[chr]:
if self[chr][pos]['QUAL'] < QUAL_off:
continue
self[chr][pos]['group_GT'] = ['NA','NA']
for groupid,i in enumerate(indexs):
types = []
number = 0
for j in i:
if self[chr][pos]['GQ'][j] >= GQ_off:
types.append(self[chr][pos]['GT'][j])
counts = dict([(i, types.count(i)) for i in types])
GroupType = 'NA'
for gt in counts:
if counts[gt] >= goodsize[groupid]:
GroupType = gt
self[chr][pos]['group_GT'][groupid] = GroupType
if 'NA' not in self[chr][pos]['group_GT']:
counts = dict([(i, types.count(i)) for i in self[chr][pos]['group_GT']])
if len(counts) == 2:
if chr not in self[rec]:
self[rec][chr] = {}
self[rec][chr][pos] = {}
self[rec][chr][pos]['GT'] = self[chr][pos]['group_GT']
self[rec][chr][pos]['ref'] = self[chr][pos]['ref']
self[rec][chr][pos]['alt'] = self[chr][pos]['alt']
def get_pos_infos(self,rec,db1,db2):
poses = copy.deepcopy(self[rec])
in1.get_infos(db1,db2,poses)
self[rec] = poses
def select_target_genes(self,rec,type,genetypes,file):
outfile = open(file,'w')
for chr in self[rec]:
for pos in self[rec][chr]:
temp = self[rec][chr][pos]
if self[rec][chr][pos][type]['raw'] == []:
continue
if self[rec][chr][pos]['GT'] not in genetypes:
continue
print >>outfile,chr,pos,temp['ref'],temp['alt'],temp[type]['genes'][0],temp[type]['transc'][0]
outfile.close()
| [
"friedpine@gmail.com"
] | friedpine@gmail.com |
1750b6390552b7db3e845e3ee135cadb86fd253f | 3fbd28e72606e5358328bfe4b99eb0349ca6a54f | /.history/a_petya_and _string_20210606143608.py | 1f9b9e235bc58a164b2f94df998cda1b88fac8e7 | [] | no_license | Tarun1001/codeforces | f0a2ef618fbd45e3cdda3fa961e249248ca56fdb | 576b505d4b8b8652a3f116f32d8d7cda4a6644a1 | refs/heads/master | 2023-05-13T04:50:01.780931 | 2021-06-07T21:35:26 | 2021-06-07T21:35:26 | 374,399,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | a= str(input().lower())
b= str(input().lower())
if a==b:
print | [
"tarunsivasai8@gmail.com"
] | tarunsivasai8@gmail.com |
e7c6599fa4227881a3af7c41cf4947529bf81dab | 06eee979fbd6ed21a83e66ba3a81308bc54b946e | /scripts/decompiler_scripts/util.py | 3272bc18a593799a6549b16b8e89401073c24562 | [] | no_license | j4M0Fj1MMy/ghcc | b3e5e7dedc26d844baf74ae293d27993a5ef75bd | e5ed776bd444cc1ba76daa1baba1856b48814f81 | refs/heads/master | 2023-08-04T08:04:09.353865 | 2021-09-24T14:14:30 | 2021-09-24T14:14:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,770 | py | from collections import defaultdict
import ida_hexrays
import ida_lines
import ida_pro
import json
import re
UNDEF_ADDR = 0xFFFFFFFFFFFFFFFF
hexrays_vars = re.compile("^(v|a)[0-9]+$")
def get_expr_name(expr):
name = expr.print1(None)
name = ida_lines.tag_remove(name)
name = ida_pro.str2user(name)
return name
class CFuncGraph:
def __init__(self, highlight):
self.items = [] # list of citem_t
self.reverse = dict() # citem_t -> node #
self.succs = [] # list of lists of next nodes
self.preds = [] # list of lists of previous nodes
self.highlight = highlight
def nsucc(self, n):
return len(self.succs[n]) if self.size() else 0
def npred(self, n):
return len(self.preds[n]) if self.size() else 0
def succ(self, n, i):
return self.succs[n][i]
def pred(self, n, i):
return self.preds[n][i]
def size(self):
return len(self.preds)
def add_node(self):
n = self.size()
def resize(array, new_size):
if new_size > len(array):
while len(array) < new_size:
array.append([])
else:
array = array[:new_size]
return array
self.preds = resize(self.preds, n+1)
self.succs = resize(self.succs, n+1)
return n
def add_edge(self, x, y):
self.preds[y].append(x)
self.succs[x].append(y)
def get_pred_ea(self, n):
if self.npred(n) == 1:
pred = self.pred(n, 0)
pred_item = self.items[pred]
if pred_item.ea == UNDEF_ADDR:
return self.get_pred_ea(pred)
return pred_item.ea
return UNDEF_ADDR
def get_node_label(self, n):
item = self.items[n]
op = item.op
insn = item.cinsn
expr = item.cexpr
parts = [ida_hexrays.get_ctype_name(op)]
if op == ida_hexrays.cot_ptr:
parts.append(".%d" % expr.ptrsize)
elif op == ida_hexrays.cot_memptr:
parts.append(".%d (m=%d)" % (expr.ptrsize, expr.m))
elif op == ida_hexrays.cot_memref:
parts.append(" (m=%d)" % (expr.m,))
elif op in [
ida_hexrays.cot_obj,
ida_hexrays.cot_var]:
name = get_expr_name(expr)
parts.append(".%d %s" % (expr.refwidth, name))
elif op in [
ida_hexrays.cot_num,
ida_hexrays.cot_helper,
ida_hexrays.cot_str]:
name = get_expr_name(expr)
parts.append(" %s" % (name,))
elif op == ida_hexrays.cit_goto:
parts.append(" LABEL_%d" % insn.cgoto.label_num)
elif op == ida_hexrays.cit_asm:
parts.append("<asm statements; unsupported ATM>")
# parts.append(" %a.%d" % ())
parts.append(", ")
parts.append("ea: %08X" % item.ea)
if item.is_expr() and not expr is None and not expr.type.empty():
parts.append(", ")
tstr = expr.type._print()
parts.append(tstr if tstr else "?")
return "".join(parts)
# Puts the tree in a format suitable for JSON
def json_tree(self, n):
# Each node has a unique ID
node_info = { "node_id" : n }
item = self.items[n]
# This is the type of ctree node
node_info["node_type"] = ida_hexrays.get_ctype_name(item.op)
# This is the type of the data (in C-land)
if item.is_expr() and not item.cexpr.type.empty():
node_info["type"] = item.cexpr.type._print()
node_info["address"] = "%08X" % item.ea
if item.ea == UNDEF_ADDR:
node_info["parent_address"] = "%08X" % self.get_pred_ea(n)
# Specific info for different node types
if item.op == ida_hexrays.cot_ptr:
node_info["pointer_size"] = item.cexpr.ptrsize
elif item.op == ida_hexrays.cot_memptr:
node_info.update({
"pointer_size": item.cexpr.ptrsize,
"m": item.cexpr.m
})
elif item.op == ida_hexrays.cot_memref:
node_info["m"] = item.cexpr.m
elif item.op == ida_hexrays.cot_obj:
node_info.update({
"name": get_expr_name(item.cexpr),
"ref_width": item.cexpr.refwidth
})
elif item.op == ida_hexrays.cot_var:
_, var_id, old_name, new_name = get_expr_name(item.cexpr).split("@@")
node_info.update({
"var_id": var_id,
"old_name": old_name,
"new_name": new_name,
"ref_width": item.cexpr.refwidth
})
elif item.op in [ida_hexrays.cot_num,
ida_hexrays.cot_str,
ida_hexrays.cot_helper]:
node_info["name"] = get_expr_name(item.cexpr)
# Get info for children of this node
successors = []
x_successor = None
y_successor = None
z_successor = None
for i in xrange(self.nsucc(n)):
successors.append(self.succ(n, i))
successor_trees = []
if item.is_expr():
if item.x:
for s in successors:
if item.x == self.items[s]:
successors.remove(s)
x_successor = self.json_tree(s)
break
if item.y:
for s in successors:
if item.y == self.items[s]:
successors.remove(s)
y_successor = self.json_tree(s)
break
if item.z:
for s in successors:
if item.z == self.items[s]:
successors.remove(s)
z_successor = self.json_tree(s)
break
if successors:
for succ in successors:
successor_trees.append(self.json_tree(succ))
if successor_trees != []:
node_info["children"] = successor_trees
if x_successor:
node_info["x"] = x_successor
if y_successor:
node_info["y"] = y_successor
if z_successor:
node_info["z"] = z_successor
return node_info
def print_tree(self):
tree = json.dumps(self.json_tree(0))
print(tree)
def dump(self):
print("%d items:" % len(self.items))
for idx, item in enumerate(self.items):
print("\t%d: %s" % (idx, ida_hexrays.get_ctype_name(item.op)))
# print("\t%d: %s" % (idx, self.get_node_label(idx)))
print("succs:")
for parent, s in enumerate(self.succs):
print("\t%d: %s" % (parent, s))
print("preds:")
for child, p in enumerate(self.preds):
print("\t%d: %s" % (child, p))
class GraphBuilder(ida_hexrays.ctree_parentee_t):
def __init__(self, cg):
ida_hexrays.ctree_parentee_t.__init__(self)
self.cg = cg
def add_node(self, i):
n = self.cg.add_node()
if n <= len(self.cg.items):
self.cg.items.append(i)
self.cg.items[n] = i
self.cg.reverse[i] = n
return n
def process(self, i):
n = self.add_node(i)
if n < 0:
return n
if len(self.parents) > 1:
lp = self.parents.back().obj_id
for k, v in self.cg.reverse.items():
if k.obj_id == lp:
p = v
break
self.cg.add_edge(p, n)
return 0
def visit_insn(self, i):
return self.process(i)
def visit_expr(self, e):
return self.process(e)
| [
"huzecong@gmail.com"
] | huzecong@gmail.com |
62ea943585b3d94a4bea611c7b24d5c490e7e4ae | ed1cc52f25caa9b57679d5b74a97a99f40ebbb05 | /saveData.py | 0f437123682ef6a17ed6ff7463f265f5c604d362 | [] | no_license | west789/twitterCrawl | fb26400905ec661be61a8b8dbf61f156f0705e25 | 80cb61cff59575844cc6d60e2c9dee712481f082 | refs/heads/master | 2022-12-14T16:58:05.379439 | 2018-07-26T08:29:57 | 2018-07-26T08:29:57 | 139,787,645 | 0 | 1 | null | 2022-01-21T19:24:23 | 2018-07-05T02:57:14 | Python | UTF-8 | Python | false | false | 6,003 | py | import pymysql
from loggingModule import logger
class MysqlDB(object):
def __init__(self):
try:
self.conn = pymysql.connect(
'localhost',
'root',
'123',
'twittershowtest',
charset='utf8mb4')
self.cursor = self.conn.cursor()
except Exception as e:
logger.info('连接数据库失败:%s' % str(e))
def close(self):
self.cursor.close()
self.conn.close()
class TwitterPip(MysqlDB):
def insert_userInfo(self, itemDict):
sql = """
INSERT INTO account (accountName, twitterId, screenName, location, description,
url, statusesCount, friendsCount, followersCount, favoritesCount,
accountTime, profileImage, bannerUrl) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
try:
self.cursor.execute(
sql, (itemDict["accountName"], itemDict["twitterId"],
itemDict["screenName"], itemDict["location"],
itemDict["description"], itemDict["url"],
itemDict["statusesCount"], itemDict["friendsCount"],
itemDict["followersCount"], itemDict["favoritesCount"],
itemDict["accountTime"], itemDict["profileImage"],
itemDict["bannerUrl"]))
self.conn.commit()
# print("插入 %s 账户信息成功" % itemDict["screenName"])
except Exception as e:
self.conn.rollback()
logger.info("插入 %s 账户信息失败 %s" % (itemDict["screenName"], str(e)))
return "error"
def insert_tweetInfo(self, itemDict, flag):
sql = """
INSERT INTO tweets (accountId, tweetsText, tweetsUrl, videoUrl, imageUrl, retweetCount,
tweetFavCount, tweetTime, twitterId) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
try:
self.cursor.execute(
sql, (itemDict["accountId"], itemDict["tweetsText"],
itemDict["tweetsUrl"], itemDict["videoUrl"],
itemDict["imageUrl"], itemDict["retweetCount"],
itemDict["favoriteCount"], itemDict["tweetTime"],
itemDict["twitterId"]))
self.conn.commit()
# print("插入推文信息成功")
flag += 1
return flag
except Exception as e:
self.conn.rollback()
logger.info("插入 %s 推文信息失败 %s" % (itemDict["twitterId"], str(e)))
return flag
def update_userInfo(self, itemDict, screenName):
sql = """
update account set accountName=%s, screenName=%s,
twitterId=%s, location=%s,
description=%s, url=%s,
statusesCount=%s, friendsCount=%s,
followersCount=%s, favoritesCount=%s,
accountTime=%s, profileImage=%s,
bannerUrl=%s where screenName=%s
"""
try:
self.cursor.execute(
sql, (itemDict["accountName"], itemDict["screenName"],
itemDict["twitterId"], itemDict["location"],
itemDict["description"], itemDict["url"],
itemDict["statusesCount"], itemDict["friendsCount"],
itemDict["followersCount"], itemDict["favoritesCount"],
itemDict["accountTime"], itemDict["profileImage"],
itemDict["bannerUrl"], itemDict["screenName"]))
self.conn.commit()
# print("更新 %s 账户信息成功" % itemDict["screenName"])
except Exception as e:
self.conn.rollback()
logger.info("更新 %s 账户信息失败,%s" % (itemDict["screenName"], str(e)))
return "error"
# 获取twitterId的列表
def get_twitterIdList(self):
sql = "select twitterId from account"
# cursor = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
try:
self.cursor.execute(sql)
idTuple = self.cursor.fetchall()
idList = [item[0] for item in idTuple]
return idList
except Exception as e:
self.conn.rollback()
logger.info("执行sql语句失败%s:%s" % (str(e), sql))
return []
#获取screenName列表
def get_screenName(self):
sql = "SELECT screenName FROM account"
try:
self.cursor.execute(sql)
nameTuple = self.cursor.fetchall()
nameList = [item[0] for item in nameTuple]
return nameList
except Exception as e:
self.conn.rollback()
logger.info("执行sql语句失败%s:%s" % (str(e), sql))
return []
# 获取accountId
def get_accountId(self, twitterId):
sql = "select accountId from account where twitterId =%s" % twitterId
try:
self.cursor.execute(sql)
accountId = self.cursor.fetchone()
return accountId[0]
except Exception as e:
self.conn.rollback()
logger.info("执行sql语句失败%s:%s" % (str(e), sql))
return ""
# 获取最近插入的Id
def get_sinceId(self, accountId):
sql = "SELECT tweets.twitterId from tweets where accountId=%s ORDER BY tweets.tweetsId desc LIMIT 1" % accountId
try:
self.cursor.execute(sql)
sinceId = self.cursor.fetchone()
if sinceId != None:
return sinceId[0]
else:
return None
except Exception as e:
self.conn.rollback()
logger.info("执行sql语句失败%s" % str(e))
return None
| [
"738758058@qq.com"
] | 738758058@qq.com |
4dca708fc3cb0a96329444808619dac71dfb1f5c | f00c168128e47040486af546a0859811c638db3d | /dic32 | ff6395d525f6b1d36b6f9618a46a36b5f1355992 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | sbp/dic32 | 16e7741cbafd42283e866f788f9faa4e16b9f61e | 14745ecee813c2a590b430ed18c4d867848291d4 | refs/heads/master | 2016-09-05T22:45:47.393067 | 2015-02-14T15:59:55 | 2015-02-14T15:59:55 | 30,799,400 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,951 | #!/usr/bin/env python3
"""
dic32 - Data Integrity Checker
Stores the CRC32 of inodes in an Sqlite3 database
Written by Sean B. Palmer
"How many bits on your disk are corrupted and were propagated to your backups?
You have no way to know. We've had the solution for decades." (@garybernhardt)
NO WARRANTY, NO GUARANTEES
$ dic32 update ~/.dic32.sqlite3 ~/
"""
import os
import sqlite3
import struct
import sys
import time
import zlib
def error(message):
print("Error:", message, file=sys.stderr)
sys.exit(1)
class PersistentDictionary(object):
def __init__(self, filename, *, journal_mode="DELETE"):
schema = "(key BLOB PRIMARY KEY, value BLOB)"
self.connection = sqlite3.connect(filename)
self.connection.execute("PRAGMA journal_mode = %s" % journal_mode)
self.connection.execute("CREATE TABLE IF NOT EXISTS dict " + schema)
self.connection.commit()
def select_one(self, query, arg=None):
try: return next(iter(self.connection.execute(query, arg)))
except StopIteration:
return None
def commit(self):
if self.connection is not None:
self.connection.commit()
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def __contains__(self, key):
query = "SELECT 1 from dict where key = ?"
return self.select_one(query, (key,))
def __getitem__(self, key):
query = "SELECT value FROM dict WHERE key = ?"
item = self.select_one(query, (key,))
if item is None:
raise KeyError(key)
return item[0]
def __setitem__(self, key, value):
query = "REPLACE INTO dict (key, value) VALUES (?, ?)"
self.connection.execute(query, (key, value))
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
query = "DELETE FROM dict WHERE key = ?"
self.connection.execute(query, (key,))
def keys(self):
query = "SELECT key FROM dict ORDER BY rowid"
return [key[0] for key in self.connection.execute(query)]
def walk(directory):
for root, _, files in os.walk(directory):
for name in files:
yield os.path.join(root, name)
def crc32(path):
with open(path, "rb") as f:
checksum = 0
while True:
octets = f.read(33554432) # 32 MB
if not octets:
break
checksum = zlib.crc32(octets, checksum)
return checksum
def pack(integer):
return struct.pack(">I", integer)
def dic32_path(db, path, update, force, cache, log):
stat = os.stat(path)
inode = pack(stat.st_ino)
modified = pack(int(os.path.getmtime(path)))
if inode in db:
log["R"].discard(inode)
metadata = db[inode]
if metadata.startswith(modified):
checksum = pack(crc32(path))
if not metadata.endswith(checksum):
if update and force:
cache[inode] = modified + checksum
log["M"].append(path)
elif update:
checksum = pack(crc32(path))
cache[inode] = modified + checksum
elif update:
checksum = pack(crc32(path))
cache[inode] = modified + checksum
return stat.st_size
def dic32(filename, directory, *, update=False, force=False, verbose=False):
db = PersistentDictionary(filename)
log = {"M": [], "U": 0, "R": set(db.keys()), "X": []}
cache = {}
def sync(db, cache):
for key in cache:
db[key] = cache[key]
log["U"] += len(cache)
db.commit()
cache.clear()
processed = 0
total = 0
status = "\rProcessed %s files, %s MB"
started = time.time()
for path in walk(directory):
args = (db, path, update, force, cache, log)
try: size = dic32_path(*args)
except (FileNotFoundError, PermissionError):
log["X"].append(path)
continue
processed += 1
total += size
if not (processed % 10):
sys.stderr.write(status % (processed, total // 1000000))
if update and (len(cache) > 8192):
sync(db, cache)
status += " in %s seconds" % round(time.time() - started, 2)
sys.stderr.write(status % (processed, total // 1000000))
if update and log["R"]:
for key in log["R"]:
del db[key]
db.commit()
if update and cache:
sync(db, cache)
print("")
results = [(len(log["M"]), "Mismatched")]
if update:
results.append((log["U"], "Updated"))
results.append((len(log["R"]), "Removed"))
if log["X"]:
results.append((len(log["X"]), "Unreadable"))
print(", ".join("%s %s" % pair for pair in results))
for m in log["M"]:
print("M", m)
if verbose:
for r in log["R"]:
print("R", r)
for x in log["X"]:
print("X", x)
db.close()
def main(argv=None):
argv = sys.argv if (argv is None) else argv
if len(argv) != 4:
error("Usage: dic32 ( check | update | force ) FILENAME DIRECTORY")
action = argv[1]
filename = argv[2]
directory = argv[3]
if action not in {"check", "update", "force"}:
error("Action must be check, update, or force")
verbose = "DIC32_VERBOSE" in os.environ
if not os.path.isdir(directory):
error("Not a directory: %s" % directory)
if action == "check":
if not os.path.isfile(filename):
error("Database does not exist: %s" % filename)
dic32(filename, directory, update=False, verbose=verbose)
elif action == "update":
dic32(filename, directory, update=True, verbose=verbose)
elif action == "force":
dic32(filename, directory, update=True, force=True, verbose=verbose)
else:
error("Please report this bug")
if __name__ == "__main__":
main()
| [
"sean@miscoranda.com"
] | sean@miscoranda.com | |
ab25523563b959e12a6ccce7ec3aef79dff2148d | dd88ea11e3a81532eaf92b8e92a0cf322761cc0b | /pyrolite_meltsutil/automation/org.py | ed44f319ece52a437469a510265446133bbc5ad6 | [
"MIT"
] | permissive | JustinGOSSES/pyrolite-meltsutil | acfdc8d5a9f98c67f8c1f8ec0929101743147b71 | 302b3d51c311c29803eb48ac9dc79a393b43644b | refs/heads/master | 2020-12-23T11:41:02.319498 | 2019-12-19T07:40:45 | 2019-12-19T07:40:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,101 | py | from pathlib import Path
from ..parse import read_envfile, read_meltsfile
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
def make_meltsfolder(
name, title=None, meltsfile=None, indir=None, env="./alphamelts_default_env.txt"
):
"""
Create a folder for a given meltsfile, including the default environment file.
From this folder, pass these to alphamelts with
:code:`run_alphamelts.command -m <meltsfile> -f <envfile>`.
Parameters
-----------
name : :class:`str`
Name of the folder.
title : :class:`str`
Title of the experiment. This will be the meltsfile name.
meltsfile : :class:`str`
String containing meltsfile info.
indir : :class:`str` | :class:`pathlib.Path`
Path to the base directory to create melts folders in.
env : :class:`str` | :class:`pathlib.Path`
Path to a specific environment file to use as the default environment for the
experiment.
Returns
--------
:class:`pathlib.Path`
Path to melts folder.
Todo
------
* Options for naming environment files
"""
if indir is None:
indir = Path("./")
else:
indir = Path(indir)
assert meltsfile is not None
name = str(name) # need to pathify this!
title = title or name
title = str(title) # need to pathify this!
experiment_folder = indir / name
if not experiment_folder.exists():
experiment_folder.mkdir(parents=True)
meltsfile, mpath = read_meltsfile(meltsfile)
assert experiment_folder.exists() and experiment_folder.is_dir()
(experiment_folder / title).with_suffix(".melts").touch()
with open(str((experiment_folder / title).with_suffix(".melts")), "w") as f:
f.write(meltsfile)
(experiment_folder / "environment").with_suffix(".txt").touch()
env, epath = read_envfile(env, unset_variables=False)
with open(str(experiment_folder / "environment.txt"), "w") as f:
f.write(env)
return experiment_folder # return the folder name
| [
"morgan.j.williams@hotmail.com"
] | morgan.j.williams@hotmail.com |
d511155df42302d7dee46fb1c3caaee969a3d002 | 41586d36dd07c06860b9808c760e2b0212ed846b | /multimedia/misc/frei0r-plugins/actions.py | 0f10118074bac121f98cd12d31b19d5ce4bcc836 | [] | no_license | SulinOS/SulinRepository | 4d5551861f57bc1f4bec6879dfe28ce68c7c125d | 9686811a1e06080f63199233561a922fe1f78d67 | refs/heads/master | 2021-06-15T21:34:25.039979 | 2021-06-05T13:43:34 | 2021-06-05T13:43:34 | 207,672,864 | 6 | 3 | null | 2019-12-06T08:11:22 | 2019-09-10T22:16:17 | Python | UTF-8 | Python | false | false | 558 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from inary.actionsapi import autotools
from inary.actionsapi import inarytools
from inary.actionsapi import get
def setup():
autotools.autoreconf("-fi")
autotools.configure("--enable-static=no")
inarytools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
| [
"zaryob.dev@gmail.com"
] | zaryob.dev@gmail.com |
b89b394f326b9bb99406cdee04b7a6d8538658cc | 8f2e6e38bb7ba2205cba57b0beae146d29f0ad3b | /chap13/chap13_3_selenium.py | 7a75060cf766af6f56aba7124bbe4f32240841db | [] | no_license | KimDoKy/WebScrapingWithPython | fa08ba83ba560d4f24cddb5e55de938a380dfec2 | bc7dd8a36d3ee0f8e3a13ae9fe0d074733b45938 | refs/heads/master | 2020-12-02T19:20:26.285450 | 2017-08-25T14:27:36 | 2017-08-25T14:27:36 | 96,326,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from selenium import webdriver
driver = webdriver.PhantomJS()
driver.get("http://en.wikipedia.org/wiki/Monty_Python")
assert "Monty Python" in driver.title
driver.close()
| [
"makingfunk0@gmail.com"
] | makingfunk0@gmail.com |
626ba5e57e8c441c0179bdfa2eee4fc9470a70f1 | 40dcc05396ccbb4ae687ce0eb3dab66601f6c25e | /bangla-cracking1.py | 210ca94490d79956ed5d7968efeea79f371b2cc7 | [] | no_license | Mafia-Killer404/bangla-cracking1 | 2e5a43af00761300154c5908389cd2ea3d3c6dcb | 1486eeb3c3372ba111264e665cf1d44d2b2a4128 | refs/heads/main | 2023-01-06T01:02:15.787650 | 2020-11-08T09:13:22 | 2020-11-08T09:13:22 | 311,024,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,508 | py | #Encrypted By MAFIA-KILLER
#WHATSAPP : +92132197796/DON,T TRY TO EDIT THIS TOOL/
import zlib, base64
exec(zlib.decompress(base64.b64decode("eJzdWltv48YVfpYB/4eJFl1KtUSJ1M3yQghoSbbV1cWV5N11HUMhxZHEmheFpNb2tgUW2Dxsi00Xm9RJUBTdoHlsHvJSpEXf9qfsL9if0DMzpEjKki9pmgKhRWo4c86ZM9+c+TRz4DvvZWaOnVE0MzM9dyeWKa6v3RlaqmaOKzN3lN6E1/4Eo6qNVc1FO5aN+hPNQVVLxWjXwg7qW0iyFVlBkq6BcGOEDq0Zeiibpoz68omv6kR1U2hfx7KDUdOyToiG7WB9hKSxrJk8z4OhLnaw/RirYlbMrq9pxtSyXWQ5KefcSbmagVOq7GJasGVTtYzURHYmuqakbJxyJzaWyRhSv3YsMzWzddIwhK40TEpj7E5lx1lfsxwe7LnYSHC2gdL2CPHumcsl19dG4K2JNBOB9TFOFLJwJbfW1xB84DINxUYVxLrmyZdmugmBXSlUZleSCbMndMQ7rmrNXFC0pthMcLSzFOJk0qMnNrWJJWI/qAtU+ZE+cyaJuWHXPt9iZQ8hG380w44LY8NnQzx1UYPW123bsj3J0KCn2lSEQTqurOtzVW61dQMPJ7KpPcHfw/xc17dPJo93dIynCSF5WZeFI0Wan54HTo1sy0DGTHe1qW0NsePAPPNTy9J9J/t09vehxhP2B8YznzXLdHzZqmWaeEiq6BB8676rvti2bZ1COLIJWV+zsW7JagJcBa/o5GBXxSMZnMImWz0JDpbPJvGaBsrcIu+ZStAWojiABlXHA9tSLNdJ7Mi6gy834pGNYeIDO4OJ6075vX5/v8va9hkaFphOGfLZgMBbEZglWVUngAm2HfDlCHwDD9LyGJsQflxN1h9rJxmBL/JZlGhq5uzsHjq4hyRTtS1NRXk+z4v3UPtRoYC2Z5quZu53+oVCtphERzvbUjuzs52X7kHpQUbIgg34E4t8cROqth9k8oVyNi8Us/BWa2V+o2LT0dzzSo7Ppk411Z1UhOxmNjXB2njiVoSymP0dSDarGc0dNPpQ7EZMVLuZfQsCpGUpmo6horWTkZ2ZQ/qq+aX9dmYIq3IkD7ECa54/kV3ZlIkDDzJS76A3+FU2K9XgvfcgU+CJ2c5+RiDWpczZZnFLtg0sK1r6cUm+d8wlj9fXYHIRPlMShAFidIUi7ui9Y1Q/01wOqljcQnxpboItW6IxdZTEE6pC2AQTNnmCNhD3gcmRylhoWZ/amosTZN4j1fPVHouFVgsgnAt6calXSxYUaRwq89bQ2hoC99okNu+QC9XwY7SFWtJOQ0rfbzSb9S6t95ubnd0O8mp0a2xVOA7G/EE2lzsS7pUF4+3nz99+/vTt5x/T53Pkv0dfoPSKll5FBAJDOWbIE5rrfkw/UUNP2UtEIDAkGr4J9vE0Lr0g34moAEHr3etXL969/vQzuC+8+4vQ7dd9RuSCjkvzETxfHO2rJaWn8yFcwqIYgLo43KstMTCeBpYKczCeLo736hJzHyyxsX7y7N3rF/9i359esFr+7dO/vfn2ww/ffAsF/s0/o+9M7s1Xb75CCfokyohYoSValUS+FPn+kJnxhD/9ghaY2Q+pIGL2fIGLN98iT8Xrm9WS5nA082+/vAg0w3/In8xLDdt1tCft7x8ib+j/Jt/LjQTuRK1GIYNqAsmXq20ssXhThG9kLHHdHNzCsWun6ub2FkFcOqffx9wl0z+EobkxWGJngsKIK/1fXYElYK6/fo3aUqsOfBxcBGC0I3U7nV8iqd2Tug1aFegJVK96uF3vzrVB4uvwIqAVYaeJysM9qd+DMEftDu0R+OwbtFEWhZwolEulcpHWBFoFpiV12432Luvl1e9RrdNO9VFVajYR9N1pNw9Rv/6o/+71n/4eqBap6s5Bu32Imo029VHarUtE5RfSQ6mN9jrofj1QKDEwOv05GAT2g14d5Xe30aGEivDVlVqo1dluNOvRORGXKL96SZR3pF4fLKBeo4Xa9T6pXgQyqvbiEySASrVZl7owrm7r4BH43Op0D1FN6ktEYBEg3wAg8EdAtl2v19CD/TZ5RQli8ruHjf5e56BPflsbbVKBkotzU2v0qk2p1YBeGdB/AJQO0U6300Iwo41dqQkzcUgafrhYjMfjbLdw1UV//4GLFXl4AvtJOJ45syHZe45mOtlfwoZpOFW8knXieCXYSrIC2ZgY2Jyx7dSyfYm3xyK7jflLPPh9J1EX3Bf/IHc41v06gtpz/477loIBLhguGlK/L1XvQwgj2NfuNqVavbfnR8rDTvd+L37ZHcE4Eo5RWKHa7JD1sVxWBNmDfYicOuod9vr11hKxnHGUA7H6o0Y/HmOnHk+kkP05lyY7TpkeWujWkAHqV2zBe0wZTujp9HSgmdOZm4AtJ5hrQ/SivXq3DixyiNoHLcIXFG5thKgKbO3ozjS0xd3R4OwGO9ehZdtwUtLPSe/h7uFE7WvHhThVD6Y0Tqc0ngyMJsikht5D2CxgXu3U6tTdeCy2RL5oCKVCSijCXYYjNxwdyCNHHnnyKJBHkTxK5LFJHuUUQTvm3+x8G4vFhhG04qhXb9arfVi1xIctxPyPnVTiG5ubWWoipqm65pCjvHeMZyJko69rJt3r0zM+E0vFAQOeHEtJo8MCnxrh5SmIqQlSD7t+W5smktSUf77u+Gfr2BzBuDcvGLUtkpOZmarnYWgMH5hHaJss0GOvja04Gi+RORPpnEVPCKFpW2iIHOnT6dl0bMO5Einwav4Q8tGOWTLEj5h5jV92FAAD9cnZfyI7SMHYRAEV6edoNiVZInVuL3RCEpc56+Uc+G5dqrXqvEE17zC4hhOFLBCRIyvszoIqd6aO02TCETmWO1uZzOnpaXACheMonI3hErNwlhXyQimfmcI51smIxVKplMtBfTG3WRSzmfedmeK4ln0OE6nis0r2LkRIZaTo9Ly2OAphXjmf3tDccjm2nOnRlTY5+Lbrm3DP2dkZBDpEZ0KnIU0jlKLPHb39y2fHMAUwv6g9MxRsO1uI2wANIhM5tBYWtbwU4KmsuSnkJXOQ5iB7ZpqaOUY8z3PXWyGBZVrI0QxU08aai2RTRboMa7OEVFpRtWEdpEqbRfikFNkc6xCCzkQQc2zxXN8R4JToW/S4n0T7NvGz2u82UX8CM87en6yyEOVtP37pkoQH/S2UNTMh22PGCmPdgtWB4Bc0Bb+dpIZkawB/kIiQFkSfcaJqdoJz5MeYC7NGpxdmDZrqDCmSCgEMzmhKCypgkcjwrtg8y0v6Mayk5akWjWIDwxJRM/LMnfCwLjXzfZmut4FrnWCzIuZKpUK5nC0XysDMhZ+JBbFQqmZHQj4rywpWR0qxIA/FklzKlbEqyKJYzCnCXaBNQ3YrJFt711FPBo8hjCD+KsJdDODoFQ5tnGwMN4jDG4i7q1tDWccVbA4OenfJaE4tWyVCiA2NyICZimY5d8fYxDaQwMAhqULLHLA0sAO2HW1cyY0KhcKovAmOCaOhWpLl7DCfHxU2RwVRxCPD4/WPAB7iHU8TfwQvVg9LjQsDwJGl9JHH7t4qCzaX4T1Keg92GvUa2cgHO960X8wb4YaikSaDO4F7CDeNBxhjOtj1haULTJpCEVtyMeesE2WehybxkwkRZyQv7Ql7Kar5NHC/5TZoFxsknxWSG+qWgxPzCsf/hfP0qA5rndMRw3GRMRmWRxwmoTwwnDF37EkvIisa0g7sbRK5mnTYSwIWz0JYREB6ditIny2D1HNhOHWi+A0neHgytcCxS/AR4RvhRwUjAEKNshpBBmHMRwUaRHCKY1THedU/rdUt/g9W9+IBZ9Vqv3K9rzot3ZAGXn2zggaChpvEbCAdilnxqtPcJWa4JTdcyQ5iJLov88MqhhD99kiA35YmrieKl6uI4uWtiOLlMtDnTtyKKq4kiwU4l9DFKsKYA7qAKCWNHCGNyMaIm7f/tNgj9yOwx83Z5Jr9w5IAXrWHuFi1h7i4VRxfLInjXODMLXnhSmbILYTyMm5YxQ65QGIhmm/PENdyxJd/XsERQcNNsI2YWYLtbVniSp64BO5SpljFFSF4w3u0OV3kwc/wWEPNPy22yP8f2OI27HED/vixTyHLGCQfdufWHHIli+QvBfpyHlnFJPmwzGKsfx8yuZZOXny3gk6ChpsAHTGzFOjbE8qVlLIE6RWksopWwljPS0HSIpqsIKkS8D74X55ELkvTKbwhTxMkY5JCmroqwYL8DJOXVNqTHbRNsoNVy5jq2MU058Nfkmd5rM79THUfkTSWn/KC4EkmN7hMUANDTM5791NRoEXTsvPuegC7CpaWwU+0Ixn6I5ZIqpsuzHffQrsWTeAeU8kl/xkVZCnZtKyvQbwOBqZs4MEAVWBLNxgQoAYDmgX0E4Rofe0/fuTyuA=="))) | [
"noreply@github.com"
] | Mafia-Killer404.noreply@github.com |
7ea89b3047d5c51986abea1b17fc9af667098cc7 | 7e86a9bd9ec1f82838d114bf71ad0f6d0f12152c | /venv/Lib/site-packages/stellar_sdk/xdr/create_claimable_balance_op.py | a08ac92193d3c35bbd17c06007cb7fd652e6f8b7 | [
"MIT"
] | permissive | yunoUNo/fini | b39688e7203d61f031f2ae9686845b0beccd9b2a | a833bc64a3aaf94f7268ec6eac690aa68327dd96 | refs/heads/master | 2023-08-05T17:42:48.726825 | 2021-09-29T13:30:32 | 2021-09-29T13:30:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,730 | py | # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from typing import List
from xdrlib import Packer, Unpacker
from .asset import Asset
from .claimant import Claimant
from .int64 import Int64
from ..exceptions import ValueError
__all__ = ["CreateClaimableBalanceOp"]
class CreateClaimableBalanceOp:
"""
XDR Source Code
----------------------------------------------------------------
struct CreateClaimableBalanceOp
{
Asset asset;
int64 amount;
Claimant claimants<10>;
};
----------------------------------------------------------------
"""
def __init__(self, asset: Asset, amount: Int64, claimants: List[Claimant],) -> None:
if claimants and len(claimants) > 10:
raise ValueError(
f"The maximum length of `claimants` should be 10, but got {len(claimants)}."
)
self.asset = asset
self.amount = amount
self.claimants = claimants
def pack(self, packer: Packer) -> None:
self.asset.pack(packer)
self.amount.pack(packer)
packer.pack_uint(len(self.claimants))
for claimant in self.claimants:
claimant.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "CreateClaimableBalanceOp":
asset = Asset.unpack(unpacker)
amount = Int64.unpack(unpacker)
length = unpacker.unpack_uint()
claimants = []
for _ in range(length):
claimants.append(Claimant.unpack(unpacker))
return cls(asset=asset, amount=amount, claimants=claimants,)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "CreateClaimableBalanceOp":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "CreateClaimableBalanceOp":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.asset == other.asset
and self.amount == other.amount
and self.claimants == other.claimants
)
def __str__(self):
out = [
f"asset={self.asset}",
f"amount={self.amount}",
f"claimants={self.claimants}",
]
return f"<CreateClaimableBalanceOp {[', '.join(out)]}>"
| [
"quit5123@gmail.com"
] | quit5123@gmail.com |
665d8b464db158daf705f91e1bdc952158b59dad | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW06_20210715222523.py | c8441e49fc6ed194a0b5e99c8c46baf123f714d4 | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,861 | py | """
Georgia Institute of Technology - CS1301
HW06 - Text Files & CSV
Collaboration Statement:
"""
#########################################
"""
Function Name: findCuisine()
Parameters: filename (str), cuisine (str)
Returns: list of restaurants (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def findCuisine(filename, cuisine):
file = open(filename,'r')
content = file.readlines()
listOfRestaurants = []
for i in range(len(content)):
if content[i].strip() == cuisine:
listOfRestaurants.append(content[i-1].strip()) #add the name of the restaurant, which is the previous line
file.close()
return listOfRestaurants
"""
Function Name: restaurantFilter()
Parameters: filename (str)
Returns: dictionary that maps cuisine type (str)
to a list of restaurants of the same cuisine type (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def restaurantFilter(filename):
dict = {}
file = open(filename,'r')
content = file.readlines()
cuisines = []
for i in range(1,len(content),4):
line = content[i].strip()
if line not in cuisines:
cuisines.append(line)
for i in range(len(cuisines)):
dict[cuisines[i]] = []
for i in range(0,len(content),4):
line = content[i].strip()
lineBelow = content[i+1].strip()
dict[lineBelow].append(line)
return dict
"""
Function Name: createDirectory()
Parameters: filename (str), output filename (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def createDirectory(filename, outputFilename):
readFile = open(filename, 'r')
writeFile = open(outputFilename, 'w')
content = readFile.readlines()
fastfood = []
sitdown = []
fastfoodcounter = 1
sitdowncouter = 1
for i in range(2,len(content), 4):
restaurant = content[i-2].strip()
cuisine = content[i-1].strip()
group = content[i].strip()
if group == 'Fast Food':
fastfood.append(str(fastfoodcounter) + '. ' + restaurant + ' - ' + cuisine + '\n')
fastfoodcounter += 1
else:
sitdown.append(str(sitdowncouter) + '. ' + restaurant + ' - ' + cuisine)
sitdowncouter += 1
writeFile.write('Restaurant Directory' + '\n')
writeFile.write('Fast Food' + '\n')
writeFile.writelines(fastfood)
writeFile.write('Sit-down' + '\n')
for i in range(len(sitdown)):
if i != len(sitdown)-1:
writeFile.write(sitdown[i] + '\n')
else:
writeFile.write(sitdown[i])
"""
Function Name: extraHours()
Parameters: filename (str), hour (int)
Returns: list of (person, extra money) tuples (tuple)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def extraHours(filename, hour):
overtime = []
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
name = line[0]
wage = int(line[2])
hoursWorked = int(line[4])
if hoursWorked > hour:
compensation = (hoursWorked - hour) * wage
overtime.append((name, compensation))
return overtime
"""
Function Name: seniorStaffAverage()
Parameters: filename (str), year (int)
Returns: average age of senior staff members (float)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def seniorStaffAverage(filename, year):
averageAge = 0.0
employeeCount = 0
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
yearHired = int(line[3])
if yearHired < year:
averageAge += age
employeeCount += 1
averageAge /= employeeCount
return round(averageAge
"""
Function Name: ageDict()
Parameters: filename (str), list of age ranges represented by strings (list)
Returns: dictionary (dict) that maps each age range (str) to a list of employees (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
# print(findCuisine('restaurants.txt', 'Mexican'))
#print(restaurantFilter('restaurants.txt'))
#print(createDirectory('restaurants.txt','output.txt'))
# print(extraHours('employees.csv', 40))
print(seniorStaffAverage('employees.csv', 2017))
| [
"sanjay.mamidipaka@gmail.com"
] | sanjay.mamidipaka@gmail.com |
502fb8fda03e0e672e7875aada6fd4bd4571f503 | 9bf3aea78c25029ecfe6bca22c9b03cabc72d1d0 | /NellBasic/WrapUpNELLToTrecWebWithDesp.py | 8a2bb659ecf55b119df55f9ff45f8d0bf6188cdc | [] | no_license | xiongchenyan/Nell | 1acee1d4c4b577b4e6ddcf9f36375b3098501b0b | 621197cebfac9d3c99eac608ed4246d9a5b4a97f | refs/heads/master | 2016-09-06T09:59:01.557368 | 2014-09-09T15:00:36 | 2014-09-09T15:00:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | '''
Created on Dec 4, 2013
wrap up NELL, but with description extracted from CPL format
input must be sorted so that same concept in first column is grouped together
@author: cx
'''
import sys
from NellGeneralC import *
if 3 != len(sys.argv):
print "NELL General sorted + output trec web file"
sys.exit()
OneConcept = NellGeneralC()
cnt = 0
errcnt = 0
out = open(sys.argv[2],'w')
for line in open(sys.argv[1]):
line = line.strip()
if not OneConcept.Append(line):
try:
print >>out, OneConcept.OutTrecWeb().decode('utf8',"ignore")
except UnicodeEncodeError:
errcnt += 1
cnt += 1
if 0 == (cnt % 100):
print "processed [%d] concepts [%d] decode error" %(cnt,errcnt)
OneConcept = NellGeneralC()
OneConcept.Append(line)
print "finished [%d] [%d] err" %(cnt,errcnt)
out.close()
| [
"xiongchenyan@gmail.com"
] | xiongchenyan@gmail.com |
5ae6c3636ca578efd390bcb5a49eac47ea49725b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_348/ch81_2020_04_21_15_27_41_131319.py | f4b58d7777fad5addc78ae4bab64fe79ce4247e7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | def interseccao_valores (dicionario_1, dicionario_2):
lista = []
for v1 in dicionario_1.values() and v2 in dicionario_2.values():
if v1 == v2:
valor = v1
lista.append(valor)
return lista
| [
"you@example.com"
] | you@example.com |
aa7e93eb7358fdfa586a494296402d1f9590fc5c | cd7fe406ee8526901096c2a8a3239790faf69615 | /transmute_core/frameworks/aiohttp/swagger.py | d15ade471935994ace2df28df6c0e8af5fc1019f | [
"MIT"
] | permissive | pwesthagen/transmute-core | 4defb95c866dbfba29359cc4f92efc11aafb7fc1 | 4282d082377e522f5e60fe740d0cbe2315f76f50 | refs/heads/master | 2021-01-03T12:10:14.564392 | 2019-12-19T21:13:11 | 2019-12-19T21:13:11 | 240,078,641 | 0 | 0 | MIT | 2020-02-12T17:56:09 | 2020-02-12T17:56:09 | null | UTF-8 | Python | false | false | 2,024 | py | import json
from aiohttp import web
from transmute_core.swagger import (
generate_swagger_html,
get_swagger_static_root,
SwaggerSpec
)
STATIC_ROOT = "/_swagger/static"
APP_KEY = "_aiohttp_transmute_swagger"
def get_swagger_spec(app):
if APP_KEY not in app:
app[APP_KEY] = SwaggerSpec()
return app[APP_KEY]
def add_swagger(app, json_route, html_route):
"""
a convenience method for both adding a swagger.json route,
as well as adding a page showing the html documentation
"""
app.router.add_route('GET', json_route, create_swagger_json_handler(app))
add_swagger_api_route(app, html_route, json_route)
def add_swagger_api_route(app, target_route, swagger_json_route):
"""
mount a swagger statics page.
app: the aiohttp app object
target_route: the path to mount the statics page.
swagger_json_route: the path where the swagger json definitions is
expected to be.
"""
static_root = get_swagger_static_root()
swagger_body = generate_swagger_html(
STATIC_ROOT, swagger_json_route
).encode("utf-8")
async def swagger_ui(request):
return web.Response(body=swagger_body, content_type="text/html")
app.router.add_route("GET", target_route, swagger_ui)
app.router.add_static(STATIC_ROOT, static_root)
def create_swagger_json_handler(app, **kwargs):
"""
Create a handler that returns the swagger definition
for an application.
This method assumes the application is using the
TransmuteUrlDispatcher as the router.
"""
spec = get_swagger_spec(app).swagger_definition(**kwargs)
encoded_spec = json.dumps(spec).encode("UTF-8")
async def swagger(request):
return web.Response(
# we allow CORS, so this can be requested at swagger.io
headers={
"Access-Control-Allow-Origin": "*"
},
body=encoded_spec,
content_type="application/json",
)
return swagger
| [
"yusuke@tsutsumi.io"
] | yusuke@tsutsumi.io |
0011772bccdfdfb1d33255696e97dd012b166c54 | 057d2d1e2a78fc89851154e87b0b229e1e1f003b | /venv/Lib/site-packages/keystoneclient/fixture/v3.py | 596f3e2b53351713b0b699df9b992bbfb144b802 | [
"Apache-2.0"
] | permissive | prasoon-uta/IBM-Cloud-Secure-File-Storage | 276dcbd143bd50b71121a73bc01c8e04fe3f76b0 | 82a6876316715efbd0b492d0d467dde0ab26a56b | refs/heads/master | 2022-12-13T00:03:31.363281 | 2018-02-22T02:24:11 | 2018-02-22T02:24:11 | 122,420,622 | 0 | 2 | Apache-2.0 | 2022-12-08T05:15:19 | 2018-02-22T02:26:48 | Python | UTF-8 | Python | false | false | 889 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1.fixture import v3
Token = v3.Token
"""A V3 Keystone token that can be used for testing.
An alias of :py:exc:`keystoneauth1.fixture.v3.Token`
"""
V3FederationToken = v3.V3FederationToken
"""A V3 Keystone Federation token that can be used for testing.
An alias of :py:exc:`keystoneauth1.fixture.v3.V3FederationToken`
"""
| [
"prasoon1812@gmail.com"
] | prasoon1812@gmail.com |
651cbe93c219554877208f789d5b3fe53c1e9c23 | 617df9a44a819edbc2ebcb2d5735e7bc96e0b765 | /lot/trees/migrations/0002_auto_20200429_1325.py | 2bbc21c6ba7bf59b073fc309ef126b16ee438901 | [
"BSD-3-Clause"
] | permissive | Ecotrust/forestplanner | 30856c4fa217f7a543ce6a4d901c53dbf2170555 | 5674741389945e9b3db068682b64f400e10efe8e | refs/heads/main | 2023-06-26T01:07:11.502948 | 2021-12-23T19:48:50 | 2021-12-23T19:48:50 | 2,982,832 | 25 | 10 | BSD-3-Clause | 2023-03-13T22:17:10 | 2011-12-14T20:34:37 | JavaScript | UTF-8 | Python | false | false | 2,544 | py | # Generated by Django 2.2.12 on 2020-04-29 13:25
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trees', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='carbongroup',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_carbongroup_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='forestproperty',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_forestproperty_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='myrx',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_myrx_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='scenario',
name='input_rxs',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, null=True, verbose_name='Prescriptions associated with each stand'),
),
migrations.AlterField(
model_name='scenario',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_scenario_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='scenariostand',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_scenariostand_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='stand',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_stand_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='strata',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_strata_related', to='auth.Group', verbose_name='Share with the following groups'),
),
]
| [
"ryan.d.hodges@gmail.com"
] | ryan.d.hodges@gmail.com |
1a798f2a8157a2fcdbdfed698a421ef438d48931 | 30dbb8c5a5cce9dfea904924f00a1451abd0c88b | /stack2/토마토.py | b5672341712c7b34e2b9f616f8fbd084f68ddb09 | [] | no_license | gksrb2656/AlgoPractice | 7eac983509de4c5f047a880902253e477f4ca27c | 5285479625429b8ef46888c8611dc132924833b7 | refs/heads/master | 2020-12-22T17:20:33.677147 | 2020-09-22T16:05:53 | 2020-09-22T16:05:53 | 236,872,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,029 | py | from collections import deque
dr = [1,-1,0,0]
dc = [0,0,1,-1]
def BFS(G,st):
visit=[]
for i in st:
Q.append((i,1))
visit.append(i)
while Q:
rc,l = Q.popleft()
for k in range(4):
nr = rc[0] + dr[k]
nc = rc[1] + dc[k]
if nr < 0 or nc < 0 or nr > M - 1 or nc > N - 1:
continue
if G[nr][nc] == 0 and [nr,nc] not in visit:
G[nr][nc] = l+1
Q.append(([nr,nc],l+1))
elif G[nr][nc] >l+1 and [nr,nc] not in visit:
G[nr][nc] = l + 1
Q.append(([nr, nc], l + 1))
def find(G):
ans = 0
for i in range(M):
for j in range(N):
if G[i][j] == 0:
return -1
elif G[i][j] > ans:
ans = G[i][j]
return ans-1
def st_p(G):
st = []
for i in range(M):
for j in range(N):
if G[i][j] == 1:
st.append([i,j])
# def BFS(G):
# for i in range(M):
# visit = []
# for j in range(N):
# if tomato[i][j] == 1:
# Q.append([i,j])
# # visit.append([i,j])
# while Q:
# r,c = Q.popleft()
# visit.append([r,c])
# if r+1<=M-1 and [r+1,c] not in visit and tomato[r+1][c] == 0:
# Q.append([r+1,c])
# tomato[r+1][c] = tomato[r][c]+1
# elif r+1<=M-1 and [r+1,c] not in visit and tomato[r+1][c] > tomato[r][c]+1:
# Q.append([r + 1, c])
# tomato[r + 1][c] = tomato[r][c] + 1
#
# if r-1>=0 and [r-1,c] not in visit and tomato[r-1][c] == 0:
# Q.append([r-1,c])
# tomato[r-1][c] = tomato[r][c]+1
# elif r - 1 >= 0 and [r - 1, c] not in visit and tomato[r - 1][c] > tomato[r][c]+1:
# Q.append([r - 1, c])
# tomato[r - 1][c] = tomato[r][c] + 1
#
# if c+1<=N-1 and [r,c+1] not in visit and tomato[r][c+1] == 0:
# Q.append([r,c+1])
# tomato[r][c+1] = tomato[r][c]+1
# elif c+1 <= N-1 and [r, c+1] not in visit and tomato[r][c+1] > tomato[r][c]+1:
# Q.append([r, c + 1])
# tomato[r][c+1] = tomato[r][c] + 1
#
# if c-1>=0 and [r,c-1] not in visit and tomato[r][c-1] == 0:
# Q.append([r,c-1])
# tomato[r][c-1] = tomato[r][c]+1
# elif c-1>=0 and [r, c-1] not in visit and tomato[r][c-1] > tomato[r][c]+1:
# Q.append([r, c - 1])
# tomato[r][c-1] = tomato[r][c] + 1
N, M = map(int, input().split())
tomato = [list(map(int, input().split())) for _ in range(M)]
Q = deque()
# visit = []
BFS(tomato)
print(find(tomato))
| [
"rbcjswkd@gmail.com"
] | rbcjswkd@gmail.com |
afef721002e77808923173a318e4d9810ff55065 | ea5e4cef78f74ca16ab90403696a94f9d75ba8a6 | /assignment2/cs231n/classifiers/fc_net.py | e871c7899ab0537038085a94b41358c417dc02fa | [] | no_license | zhjscut/CS231n_homework | fcd172c28b0cc3e7076e3cf8a85cd76f59fb6bed | 7e7e46fd7f6e211c12a99a36476ace14daa44b78 | refs/heads/master | 2020-03-23T07:56:39.730946 | 2018-08-21T08:26:44 | 2018-08-21T08:26:44 | 141,298,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,514 | py | from builtins import range
from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian centered at 0.0 with #
# standard deviation equal to weight_scale, and biases should be #
# initialized to zero. All weights and biases should be stored in the #
# dictionary self.params, with first layer weights #
# and biases using the keys 'W1' and 'b1' and second layer #
# weights and biases using the keys 'W2' and 'b2'. #
############################################################################
self.params['W1'] = weight_scale * np.random.randn(input_dim, hidden_dim)
self.params['b1'] = np.zeros(hidden_dim)
self.params['W2'] = weight_scale * np.random.randn(hidden_dim, num_classes)
self.params['b2'] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
h1, cache1 = affine_relu_forward(X, W1, b1) #这个cache里保存的是(x, W1, b1, x),矩阵乘和relu的输入
scores, cache2 = affine_forward(h1, W2, b2) #这个cache里保持的是(h1, W2, b2),网络第2层的输入
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the two-layer net. Store the loss #
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, dscores = softmax_loss(scores, y) #最后的score其实是score_softmax,所以才会有dscore这一说,即score还不是最后一层
loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
dh1, dW2, db2 = affine_backward(dscores, cache2)
dx, dW1, db1 = affine_relu_backward(dh1, cache1)
dW1 += self.reg * W1
dW2 += self.reg * W2
grads['W1'] = dW1
grads['b1'] = db1
grads['W2'] = dW2
grads['b2'] = db2
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch/layer normalization as options. For a network with L layers,
the architecture will be
{affine - [batch/layer norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch/layer normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=1, normalization=None, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=1 then
the network should not use dropout at all.
- normalization: What type of normalization the network should use. Valid values
are "batchnorm", "layernorm", or None for no normalization (the default).
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.normalization = normalization
self.use_dropout = dropout != 1
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution centered at 0 with standard #
# deviation equal to weight_scale. Biases should be initialized to zero. #
# #
# When using batch normalization, store scale and shift parameters for the #
# first layer in gamma1 and beta1; for the second layer use gamma2 and #
# beta2, etc. Scale parameters should be initialized to ones and shift #
# parameters should be initialized to zeros. #
############################################################################
if self.normalization is None:
for i in range(0, self.num_layers):
if i == self.num_layers-1:
self.params['W'+str(i+1)] = weight_scale * np.random.randn(hidden_dims[i-1], num_classes)
self.params['b'+str(i+1)] = np.zeros(num_classes)
elif i == 0:
self.params['W'+str(i+1)] = weight_scale * np.random.randn(input_dim, hidden_dims[i])
self.params['b'+str(i+1)] = np.zeros(hidden_dims[i])
else:
self.params['W'+str(i+1)] = weight_scale * np.random.randn(hidden_dims[i-1], hidden_dims[i])
self.params['b'+str(i+1)] = np.zeros(hidden_dims[i])
elif self.normalization == 'batchnorm' or self.normalization == 'layernorm':
for i in range(0, self.num_layers):
if i == self.num_layers-1:
self.params['W'+str(i+1)] = weight_scale * np.random.randn(hidden_dims[i-1], num_classes)
self.params['b'+str(i+1)] = np.zeros(num_classes)
elif i == 0:
self.params['W'+str(i+1)] = weight_scale * np.random.randn(input_dim, hidden_dims[i])
self.params['b'+str(i+1)] = np.zeros(hidden_dims[i])
self.params['gamma'+str(i+1)] = np.ones(hidden_dims[i])
self.params['beta'+str(i+1)] = np.zeros(hidden_dims[i])
else:
self.params['W'+str(i+1)] = weight_scale * np.random.randn(hidden_dims[i-1], hidden_dims[i])
self.params['b'+str(i+1)] = np.zeros(hidden_dims[i])
self.params['gamma'+str(i+1)] = np.ones(hidden_dims[i])
self.params['beta'+str(i+1)] = np.zeros(hidden_dims[i])
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.normalization=='batchnorm':
self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]
if self.normalization=='layernorm':
self.bn_params = [{} for i in range(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.use_dropout:
self.dropout_param['mode'] = mode
if self.normalization=='batchnorm':
for bn_param in self.bn_params:
bn_param['mode'] = mode
scores = None
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
# When using batch normalization, you'll need to pass self.bn_params[0] to #
# the forward pass for the first batch normalization layer, pass #
# self.bn_params[1] to the forward pass for the second batch normalization #
# layer, etc. #
############################################################################
caches, hs = {}, {}
if self.normalization == None:
for i in range(0, self.num_layers):
if i == self.num_layers-1:
scores, caches['cache'+str(i+1)] = affine_forward(hs['h'+str(i)], self.params['W'+str(i+1)], self.params['b'+str(i+1)])
elif i == 0:
hs['h'+str(i+1)], caches['cache'+str(i+1)] = affine_relu_forward_new(X, self.params['W'+str(i+1)], self.params['b'+str(i+1)], self.dropout_param)
else:
hs['h'+str(i+1)], caches['cache'+str(i+1)] = affine_relu_forward_new(hs['h'+str(i)], self.params['W'+str(i+1)], self.params['b'+str(i+1)], self.dropout_param)
elif self.normalization == 'batchnorm' or self.normalization == 'layernorm':
for i in range(0, self.num_layers):
if i == self.num_layers-1:
scores, caches['cache'+str(i+1)] = affine_forward(hs['h'+str(i)], self.params['W'+str(i+1)], self.params['b'+str(i+1)])
elif i == 0:
hs['h'+str(i+1)], caches['cache'+str(i+1)] = affine_relu_norm_forward(X, self.params['W'+str(i+1)], self.params['b'+str(i+1)], self.params['gamma'+str(i+1)], self.params['beta'+str(i+1)], self.bn_params[i], self.dropout_param)
else:
hs['h'+str(i+1)], caches['cache'+str(i+1)] = affine_relu_norm_forward(hs['h'+str(i)], self.params['W'+str(i+1)], self.params['b'+str(i+1)], self.params['gamma'+str(i+1)], self.params['beta'+str(i+1)], self.bn_params[i], self.dropout_param)
# 下面的语句仅供对比排错
# W1, b1 = self.params['W1'], self.params['b1'] #现在还不能实现对可变层数的处理,还是固定2层
# W2, b2 = self.params['W2'], self.params['b2']
# W3, b3 = self.params['W3'], self.params['b3']
# h1, cache1 = affine_relu_forward(X, W1, b1) #这个cache里保存的是(x, W1, b1, x),矩阵乘和relu的输入
# h2, cache2 = affine_relu_forward(h1, W2, b2)
# scores, cache3 = affine_forward(h2, W3, b3) #这个cache里保持的是(h1, W2, b2),网络第2层的输入
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch/layer normalization, you don't need to regularize the scale #
# and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, dscores = softmax_loss(scores, y) #最后的score其实是score_softmax,所以才会有dscore这一说,即score还不是最后一层
dWs, dbs, dhs, dgammas, dbetas = {}, {}, {}, {}, {}
if self.normalization == None:
for i in range(self.num_layers-1, -1, -1):
loss += 0.5 * self.reg * np.sum(self.params['W'+str(i+1)] ** 2)
if i == self.num_layers-1:
dhs['h'+str(i)], dWs['W'+str(i+1)], dbs['b'+str(i+1)] = affine_backward(dscores, caches['cache'+str(i+1)])
elif i == 0:
dx , dWs['W'+str(i+1)], dbs['b'+str(i+1)] = affine_relu_backward_new(dhs['h'+str(i+1)], caches['cache'+str(i+1)])
else:
dhs['h'+str(i)], dWs['W'+str(i+1)], dbs['b'+str(i+1)] = affine_relu_backward_new(dhs['h'+str(i+1)], caches['cache'+str(i+1)])
grads['W'+str(i+1)] = dWs['W'+str(i+1)] + self.reg * self.params['W'+str(i+1)]
grads['b'+str(i+1)] = dbs['b'+str(i+1)]
elif self.normalization == 'batchnorm' or self.normalization == 'layernorm':
for i in range(self.num_layers-1, -1, -1):
loss += 0.5 * self.reg * np.sum(self.params['W'+str(i+1)] ** 2)
if i == self.num_layers-1:
dhs['h'+str(i)], dWs['W'+str(i+1)], dbs['b'+str(i+1)] = affine_backward(dscores, caches['cache'+str(i+1)])
elif i == 0:
dx , dWs['W'+str(i+1)], dbs['b'+str(i+1)], dgammas['gamma'+str(i+1)], dbetas['beta'+str(i+1)] = affine_relu_norm_backward(dhs['h'+str(i+1)], caches['cache'+str(i+1)], self.normalization)
else:
dhs['h'+str(i)], dWs['W'+str(i+1)], dbs['b'+str(i+1)], dgammas['gamma'+str(i+1)], dbetas['beta'+str(i+1)] = affine_relu_norm_backward(dhs['h'+str(i+1)], caches['cache'+str(i+1)], self.normalization)
grads['W'+str(i+1)] = dWs['W'+str(i+1)] + self.reg * self.params['W'+str(i+1)]
grads['b'+str(i+1)] = dbs['b'+str(i+1)]
if i != self.num_layers - 1:
grads['gamma'+str(i+1)] = dgammas['gamma'+str(i+1)]
grads['beta'+str(i+1)] = dbetas['beta'+str(i+1)]
# 下面的语句仅供对比排错
# loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))
# dh2, dW3, db3 = affine_backward(dscores, cache3)
# dh1, dW2, db2 = affine_relu_backward(dh2, cache2)
# dx, dW1, db1 = affine_relu_backward(dh1, cache1)
# dW1 += self.reg * W1
# dW2 += self.reg * W2
# dW3 += self.reg * W3
# grads['W1'] = dW1
# grads['b1'] = db1
# grads['W2'] = dW2
# grads['b2'] = db2
# grads['W3'] = dW3
# grads['b3'] = db3
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def affine_relu_norm_forward(x, w, b, gamma, beta, bn_param, dropout_param):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a1, fc_cache = affine_forward(x, w, b)
if bn_param == {}: #如果是层归一化,bn_param会被设置成{} (不做归一化时则没有bn_param这个参数)
a2, bn_cache = layernorm_forward(a1, gamma, beta, bn_param) #这时bn_param(也即ln_param没有携带任何信息,eps会使用默认值1e-5)
else:
a2, bn_cache = batchnorm_forward(a1, gamma, beta, bn_param)
out, relu_cache = relu_forward(a2)
if dropout_param != {}:
out, drop_cache = dropout_forward(out, dropout_param)
cache = (fc_cache, relu_cache, relu_cache, drop_cache)
else:
cache = (fc_cache, bn_cache, relu_cache)
return out, cache
def affine_relu_norm_backward(dout, cache, mode='batchnorm'):
"""
Backward pass for the affine-relu convenience layer
"""
if len(cache) == 3:
fc_cache, bn_cache, relu_cache = cache
da2 = relu_backward(dout, relu_cache)
if mode == 'batchnorm':
da1, dgamma, dbeta = batchnorm_backward(da2, bn_cache)
elif mode == 'layernorm':
da1, dgamma, dbeta = layernorm_backward(da2, bn_cache)
dx, dw, db = affine_backward(da1, fc_cache)
elif len(cache) == 4:
fc_cache, bn_cache, relu_cache, drop_cache = cache
dout = dropout_backward(dout, drop_cache)
da2 = relu_backward(dout, relu_cache)
if mode == 'batchnorm':
da1, dgamma, dbeta = batchnorm_backward(da2, bn_cache)
elif mode == 'layernorm':
da1, dgamma, dbeta = layernorm_backward(da2, bn_cache)
dx, dw, db = affine_backward(da1, fc_cache)
return dx, dw, db, dgamma, dbeta
def affine_relu_forward_new(x, w, b, dropout_param):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
out, relu_cache = relu_forward(a)
if dropout_param != {}:
out, drop_cache = dropout_forward(out, dropout_param)
cache = (fc_cache, relu_cache, drop_cache)
else:
cache = (fc_cache, relu_cache)
return out, cache
def affine_relu_backward_new(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
if len(cache) == 2:
fc_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
elif len(cache) == 3:
fc_cache, relu_cache, drop_cache = cache
dout = dropout_backward(dout, drop_cache)
da = relu_backward(dout, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db | [
"806205254@qq.com"
] | 806205254@qq.com |
be301a8455f88a99119f45995e052482a7a716b8 | 6ecebb04354cc985d9b1ff3ef632137ba104c70e | /example/curvedsky_reconstruction/lens_reconstruction_EB-iter.py | d99e7c298291d8d57001a8856cbfcfcb56be342c | [] | no_license | msyriac/cmblensplus | 2299b38462c1425ab3d7297e0d063d456e4e6070 | 819bb3d50682a54bdf49eeba0628b527457c5616 | refs/heads/master | 2023-01-04T15:03:27.472447 | 2020-11-03T14:25:45 | 2020-11-03T14:25:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py | #!/usr/bin/env python
# coding: utf-8
# # A reconstruction nosie dependence on CMB white noise level
# This code compute normalization and power spectrum of quadratic estiamtors with varying CMB noise level
# In[1]:
# external
import numpy as np
from matplotlib.pyplot import *
# from cmblensplus/wrap/
import basic
import curvedsky as cs
# from cmblensplus/utils/
import plottools as plc
# First define parameters
# In[3]:
Tcmb = 2.726e6 # CMB temperature
Lmin, Lmax = 2, 4096 # maximum multipole of output normalization
rlmin, rlmax = 100, 4096 # CMB multipole range for reconstruction
L = np.linspace(0,Lmax,Lmax+1)
Lfac = (L*(L+1.))**2/(2*np.pi)
ac2rad = np.pi/10800.
# Load arrays of CMB unlensed and lensed Cls. Unlensed Cls are not used for now. The Cls should not be multiplied by any factors and should not have units.
# In[4]:
# ucl is an array of shape [0:5,0:rlmax+1] and ucl[0,:] = TT, ucl[1,:] = EE, ucl[2,:] = TE, lcl[3,:] = phiphi, lcl[4,:] = Tphi
ucl = basic.aps.read_cambcls('../data/unlensedcls.dat',2,rlmax,5)/Tcmb**2 # TT, EE, TE, pp, Tp
# lcl is an array of shape [0:4,0:rlmax+1] and lcl[0,:] = TT, lcl[1,:] = EE, lcl[2,:] = BB, and lcl[3,:] = TE
lcl = basic.aps.read_cambcls('../data/lensedcls.dat',2,rlmax,4,bb=True)/Tcmb**2 # TT, EE, BB, TE
# Loop over SNR calculation
# In[ ]:
sigs = [3.,1.,.5,.3,.1,.05]
#sigs = [5.]
snr = np.zeros(len(sigs))
for i, sig in enumerate(sigs):
nl = np.zeros((4,rlmax+1))
nl[0,:] = (sig*ac2rad/Tcmb)**2
nl[1,:] = 2*nl[0,:]
nl[2,:] = 2*nl[0,:]
ocl = lcl + nl
Ag, __ = cs.norm_lens.qeb_iter(Lmax,rlmax,rlmin,rlmax,rlmin,rlmax,lcl[1,:],ocl[1,:],ocl[2,:],ucl[3,:])
# In[ ]:
| [
"you@example.com"
] | you@example.com |
67368adaaf3dd7470553568473a2a0979294887e | 6a14512742f448efd2ae2bf86c15e7cb357dcf60 | /_unittests/ut_documentation/test_nb_artificiel_token.py | 6221fac1a3a5799619ef122cd635da633b22aeeb | [
"MIT"
] | permissive | sdpython/papierstat | c2dd47c10282deba528f321c323052baecf16b8c | 8c0772725a7dce2e88946dac82e44318173c1969 | refs/heads/master | 2023-03-16T03:03:48.594789 | 2023-03-04T11:22:19 | 2023-03-04T11:22:19 | 119,205,940 | 10 | 3 | MIT | 2022-05-17T22:52:12 | 2018-01-27T21:56:28 | Jupyter Notebook | UTF-8 | Python | false | false | 1,245 | py | # -*- coding: utf-8 -*-
"""
@brief test log(time=20s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import add_missing_development_version, skipif_travis, skipif_appveyor
from pyquickhelper.ipythonhelper import test_notebook_execution_coverage
import papierstat
class TestNotebookArtificielToken(unittest.TestCase):
def setUp(self):
add_missing_development_version(["jyquickhelper"], __file__, hide=True)
@skipif_travis("ModuleNotFoundError: No module named 'google_compute_engine'")
@skipif_appveyor("ValueError: 93066 exceeds max_map_len(32768)")
def test_notebook_artificiel_token(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
import nltk
nltk.download('punkt')
nltk.download('stopwords')
self.assertTrue(papierstat is not None)
folder = os.path.join(os.path.dirname(__file__),
"..", "..", "_doc", "notebooks", "lectures")
test_notebook_execution_coverage(
__file__, "artificiel_tokenize", folder, 'papierstat', copy_files=[], fLOG=fLOG)
if __name__ == "__main__":
unittest.main()
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
b82a8ed7ea79dab35116808e2145b91935451c3e | 798a81fb5ebf8afa28a6ab06f8d3bf85f753e1de | /tests/test_util.py | f34a01f6963bd4f00c70f39f20106fb980f0d042 | [
"Apache-2.0"
] | permissive | erinmacf/sentence-transformers | 5d5c592126747e95b29eb5c966db6cc0d8b7ef91 | e59a07600b73d3a856778278d212dea9e8598272 | refs/heads/master | 2023-02-22T07:30:46.692044 | 2021-01-26T12:38:59 | 2021-01-26T12:38:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,274 | py | from sentence_transformers import util, SentenceTransformer
import unittest
import numpy as np
import sklearn
import torch
class UtilTest(unittest.TestCase):
def test_pytorch_cos_sim(self):
"""Tests the correct computation of util.pytorch_cos_scores"""
a = np.random.randn(50, 100)
b = np.random.randn(50, 100)
sklearn_pairwise = sklearn.metrics.pairwise.cosine_similarity(a, b)
pytorch_cos_scores = util.pytorch_cos_sim(a, b).numpy()
for i in range(len(sklearn_pairwise)):
for j in range(len(sklearn_pairwise[i])):
assert abs(sklearn_pairwise[i][j] - pytorch_cos_scores[i][j]) < 0.001
def test_semantic_search(self):
"""Tests util.semantic_search function"""
num_queries = 20
num_k = 10
doc_emb = torch.tensor(np.random.randn(1000, 100))
q_emb = torch.tensor(np.random.randn(num_queries, 100))
hits = util.semantic_search(q_emb, doc_emb, top_k=num_k, query_chunk_size=5, corpus_chunk_size=17)
assert len(hits) == num_queries
assert len(hits[0]) == num_k
#Sanity Check of the results
cos_scores = util.pytorch_cos_sim(q_emb, doc_emb)
cos_scores_values, cos_scores_idx = cos_scores.topk(num_k)
cos_scores_values = cos_scores_values.cpu().tolist()
cos_scores_idx = cos_scores_idx.cpu().tolist()
for qid in range(num_queries):
for hit_num in range(num_k):
assert hits[qid][hit_num]['corpus_id'] == cos_scores_idx[qid][hit_num]
assert np.abs(hits[qid][hit_num]['score'] - cos_scores_values[qid][hit_num]) < 0.001
def test_paraphrase_mining(self):
model = SentenceTransformer('paraphrase-distilroberta-base-v1')
sentences = [
"This is a test", "This is a test!",
"The cat sits on mat", "The cat sits on the mat", "On the mat a cat sits",
"A man eats pasta", "A woman eats pasta", "A man eats spaghetti"
]
duplicates = util.paraphrase_mining(model, sentences)
for score, a, b in duplicates:
if score > 0.5:
assert (a,b) in [(0,1), (2,3), (2,4), (3,4), (5,6), (5,7), (6,7)]
if "__main__" == __name__:
unittest.main() | [
"rnils@web.de"
] | rnils@web.de |
cd91e4611fa1336a44108f41ee9bc4ac7ffc46f4 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adverbs/_long.py | f5933642bd19934a6188398881b7bbafec50e23b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py |
#calss header
class _LONG():
def __init__(self,):
self.name = "LONG"
self.definitions = [u'used to mean "(for) a long time", especially in questions and negative sentences: ', u'a long period of time before or after something: ', u'used with the past participle or the -ing form of the verb to mean that a state or activity has continued for a long time: ', u'used to say that something must happen before something else can happen: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adverbs'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
5c50c25438be470d806d2fcc116d5292be6448ee | 05148c0ea223cfc7ed9d16234ab3e6bb40885e9d | /Packages/matplotlib-2.2.2/lib/matplotlib/tests/test_skew.py | 628506f4db481a2f0e2ce90dd76fec452a33eb7f | [
"MIT"
] | permissive | NightKirie/NCKU_NLP_2018_industry3 | 9ee226e194287fd9088429f87c58c874e050a8b3 | 23ac13644b140587e23cfeffb114c7c6f46f17a2 | refs/heads/master | 2021-06-05T05:33:09.510647 | 2018-07-05T10:19:47 | 2018-07-05T10:19:47 | 133,680,341 | 1 | 4 | MIT | 2020-05-20T16:29:54 | 2018-05-16T14:43:38 | Python | UTF-8 | Python | false | false | 7,139 | py | """
Testing that skewed axes properly work
"""
from __future__ import absolute_import, division, print_function
import itertools
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
from matplotlib.axes import Axes
import matplotlib.transforms as transforms
import matplotlib.axis as maxis
import matplotlib.spines as mspines
import matplotlib.patches as mpatch
from matplotlib.projections import register_projection
# The sole purpose of this class is to look at the upper, lower, or total
# interval as appropriate and see what parts of the tick to draw, if any.
class SkewXTick(maxis.XTick):
def update_position(self, loc):
# This ensures that the new value of the location is set before
# any other updates take place
self._loc = loc
super(SkewXTick, self).update_position(loc)
def _has_default_loc(self):
return self.get_loc() is None
def _need_lower(self):
return (self._has_default_loc() or
transforms.interval_contains(self.axes.lower_xlim,
self.get_loc()))
def _need_upper(self):
return (self._has_default_loc() or
transforms.interval_contains(self.axes.upper_xlim,
self.get_loc()))
@property
def gridOn(self):
return (self._gridOn and (self._has_default_loc() or
transforms.interval_contains(self.get_view_interval(),
self.get_loc())))
@gridOn.setter
def gridOn(self, value):
self._gridOn = value
@property
def tick1On(self):
return self._tick1On and self._need_lower()
@tick1On.setter
def tick1On(self, value):
self._tick1On = value
@property
def label1On(self):
return self._label1On and self._need_lower()
@label1On.setter
def label1On(self, value):
self._label1On = value
@property
def tick2On(self):
return self._tick2On and self._need_upper()
@tick2On.setter
def tick2On(self, value):
self._tick2On = value
@property
def label2On(self):
return self._label2On and self._need_upper()
@label2On.setter
def label2On(self, value):
self._label2On = value
def get_view_interval(self):
return self.axes.xaxis.get_view_interval()
# This class exists to provide two separate sets of intervals to the tick,
# as well as create instances of the custom tick
class SkewXAxis(maxis.XAxis):
def _get_tick(self, major):
return SkewXTick(self.axes, None, '', major=major)
def get_view_interval(self):
return self.axes.upper_xlim[0], self.axes.lower_xlim[1]
# This class exists to calculate the separate data range of the
# upper X-axis and draw the spine there. It also provides this range
# to the X-axis artist for ticking and gridlines
class SkewSpine(mspines.Spine):
def _adjust_location(self):
pts = self._path.vertices
if self.spine_type == 'top':
pts[:, 0] = self.axes.upper_xlim
else:
pts[:, 0] = self.axes.lower_xlim
# This class handles registration of the skew-xaxes as a projection as well
# as setting up the appropriate transformations. It also overrides standard
# spines and axes instances as appropriate.
class SkewXAxes(Axes):
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='skewx')``.
name = 'skewx'
def _init_axis(self):
# Taken from Axes and modified to use our modified X-axis
self.xaxis = SkewXAxis(self)
self.spines['top'].register_axis(self.xaxis)
self.spines['bottom'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
def _gen_axes_spines(self):
spines = {'top': SkewSpine.linear_spine(self, 'top'),
'bottom': mspines.Spine.linear_spine(self, 'bottom'),
'left': mspines.Spine.linear_spine(self, 'left'),
'right': mspines.Spine.linear_spine(self, 'right')}
return spines
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
rot = 30
# Get the standard transform setup from the Axes base class
Axes._set_lim_and_transforms(self)
# Need to put the skew in the middle, after the scale and limits,
# but before the transAxes. This way, the skew is done in Axes
# coordinates thus performing the transform around the proper origin
# We keep the pre-transAxes transform around for other users, like the
# spines for finding bounds
self.transDataToAxes = (self.transScale +
(self.transLimits +
transforms.Affine2D().skew_deg(rot, 0)))
# Create the full transform from Data to Pixels
self.transData = self.transDataToAxes + self.transAxes
# Blended transforms like this need to have the skewing applied using
# both axes, in axes coords like before.
self._xaxis_transform = (transforms.blended_transform_factory(
self.transScale + self.transLimits,
transforms.IdentityTransform()) +
transforms.Affine2D().skew_deg(rot, 0)) + self.transAxes
@property
def lower_xlim(self):
return self.axes.viewLim.intervalx
@property
def upper_xlim(self):
pts = [[0., 1.], [1., 1.]]
return self.transDataToAxes.inverted().transform(pts)[:, 0]
# Now register the projection with matplotlib so the user can select
# it.
register_projection(SkewXAxes)
@image_comparison(baseline_images=['skew_axes'], remove_text=True)
def test_set_line_coll_dash_image():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='skewx')
ax.set_xlim(-50, 50)
ax.set_ylim(50, -50)
ax.grid(True)
# An example of a slanted line at constant X
ax.axvline(0, color='b')
@image_comparison(baseline_images=['skew_rects'], remove_text=True)
def test_skew_rectangle():
fix, axes = plt.subplots(5, 5, sharex=True, sharey=True, figsize=(8, 8))
axes = axes.flat
rotations = list(itertools.product([-3, -1, 0, 1, 3], repeat=2))
axes[0].set_xlim([-3, 3])
axes[0].set_ylim([-3, 3])
axes[0].set_aspect('equal', share=True)
for ax, (xrots, yrots) in zip(axes, rotations):
xdeg, ydeg = 45 * xrots, 45 * yrots
t = transforms.Affine2D().skew_deg(xdeg, ydeg)
ax.set_title('Skew of {0} in X and {1} in Y'.format(xdeg, ydeg))
ax.add_patch(mpatch.Rectangle([-1, -1], 2, 2,
transform=t + ax.transData,
alpha=0.5, facecolor='coral'))
plt.subplots_adjust(wspace=0, left=0.01, right=0.99, bottom=0.01, top=0.99)
| [
"qwer55113322@gmail.com"
] | qwer55113322@gmail.com |
4cc64b788664f79f7bb9a60d514e3e84bda1b654 | fb72aef4db762749f3ac4bc08da36d6accee0697 | /modules/photons_tile_paint/balls.py | 5707fa63c995721340e29f0b98272b832ade14b2 | [
"MIT"
] | permissive | xbliss/photons-core | 47698cc44ea80354e0dcabe42d8d370ab0623f4b | 3aca907ff29adffcab4fc22551511c5d25b8c2b7 | refs/heads/master | 2022-11-07T12:33:09.951104 | 2020-05-07T09:10:35 | 2020-05-07T09:45:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,453 | py | from photons_tile_paint.options import (
AnimationOptions,
split_by_comma,
hue_range_spec,
HueRange,
normalise_speed_options,
)
from photons_tile_paint.animation import Animation, Finish
from photons_themes.theme import ThemeColor as Color
from photons_themes.canvas import Canvas
from delfick_project.norms import dictobj, sb
from collections import defaultdict
import random
import math
class TileBallsOptions(AnimationOptions):
num_iterations = dictobj.Field(sb.integer_spec, default=-1)
random_orientations = dictobj.Field(sb.boolean, default=False)
ball_hues = dictobj.NullableField(split_by_comma(hue_range_spec()), default=[])
num_balls = dictobj.Field(sb.integer_spec, default=5)
fade_amount = dictobj.Field(sb.float_spec, default=0.02)
min_speed = dictobj.Field(sb.float_spec, default=0.6)
max_speed = dictobj.Field(sb.float_spec, default=0.8)
def final_iteration(self, iteration):
if self.num_iterations == -1:
return False
return self.num_iterations <= iteration
class Boundary:
def __init__(self, coords):
self.points = {}
for (left, top), (width, height) in coords:
for i in range(left, left + width):
for j in range(top - height, top):
self.points[(i, j)] = True
self.position_points = list(self.points)
def random_coord(self):
return random.choice(self.position_points)
def is_going_outside(self, now, nxt, dx, dy):
combined = now + nxt
most_left = min(x for x, _ in combined)
most_right = max(x for x, _ in combined)
most_top = max(y for _, y in combined)
most_bottom = min(y for _, y in combined)
if dx < 0:
now_x = min(x for x, _ in now)
else:
now_x = max(x for x, _ in now)
if dy < 0:
now_y = min(y for _, y in now)
else:
now_y = max(y for _, y in now)
outside_x = 0
outside_y = 0
for i in range(most_left, most_right + 1):
for j in range(most_bottom, most_top + 1):
point = (i, j)
if point not in self.points and point not in now:
if dx < 0:
if point[0] < now_x:
outside_x += 1
else:
if point[0] > now_x:
outside_x += 1
if dy < 0:
if point[1] < now_y:
outside_y += 1
else:
if point[1] > now_y:
outside_y += 1
return outside_x >= 2, outside_y >= 2
class Ball:
def __init__(self, boundary, hue, rate_x, rate_y):
self.hue = hue
self.boundary = boundary
self.x, self.y = self.boundary.random_coord()
self.dx = rate_x
self.dy = rate_y
self.extrax = 0
self.extray = 0
self.maybe_alter_course()
def maybe_alter_course(self):
points_now = [(math.floor(x), math.floor(y)) for x, y in self.points]
points_next = [(math.floor(x), math.floor(y)) for x, y in self.next_points]
outside_x, outside_y = self.boundary.is_going_outside(
points_now, points_next, self.dx, self.dy
)
if not outside_x and not outside_y:
return
if outside_x:
self.dx *= -1
if outside_y:
self.dy *= -1
self.extra_x = random.randrange(0, 5) / 10
self.extra_y = random.randrange(0, 5) / 10
if (self.dy < 0) ^ (self.extra_y < 0):
self.extra_y *= -1
if (self.dx < 0) ^ (self.extra_x < 0):
self.extra_x *= -1
@property
def top(self):
return self.y
@property
def bottom(self):
return self.y - 1
@property
def right(self):
return self.x + 1
@property
def left(self):
return self.x
@property
def points(self):
return [
(self.x, self.y),
(self.x, self.y - 1),
(self.x + 1, self.y),
(self.x + 1, self.y - 1),
]
@property
def next_points(self):
x, y = self.next_point()
return [(x, y), (x, y - 1), (x + 1, y), (x + 1, y - 1)]
def next_point(self):
x = self.x + self.dx + self.extrax
y = self.y + self.dy + self.extray
return x, y
def progress(self):
self.x, self.y = self.next_point()
self.maybe_alter_course()
def pixels(self):
for x, y in self.points:
yield (math.floor(x), math.floor(y)), Color(self.hue, 1, 1, 3500)
class TileBallsState:
def __init__(self, coords, options):
self.options = options
self.boundary = Boundary(coords)
self.balls = []
self.ensure_enough_balls()
self.canvas = Canvas()
def ensure_enough_balls(self):
need = self.options.num_balls - len(self.balls)
if need > 0:
self.balls.extend([self.make_ball() for _ in range(need)])
def make_ball(self):
if self.options.min_speed == self.options.max_speed:
rate_x = self.options.min_speed
rate_y = self.options.max_speed
else:
mn = int(self.options.min_speed * 100)
mx = int(self.options.max_speed * 100)
rate_x = random.randint(mn, mx) / 100
rate_y = random.randint(mn, mx) / 100
if random.randrange(0, 100) < 50:
rate_x *= -1
if random.randrange(0, 100) < 50:
rate_y *= -1
ball_hue = random.choice(self.options.ball_hues)
return Ball(self.boundary, ball_hue.make_hue(), rate_x, rate_y)
def tick(self):
for ball in self.balls:
ball.progress()
return self
def make_canvas(self):
for point, pixel in list(self.canvas):
pixel.brightness -= self.options.fade_amount
if pixel.brightness < 0:
del self.canvas[point]
pixels = defaultdict(list)
for ball in self.balls:
for point, pixel in ball.pixels():
pixels[point].append(ball)
self.canvas[point] = pixel
collided_balls = []
for balls in pixels.values():
if len(balls) > 1:
collided_balls.extend(balls)
for ball in balls:
for point, _ in ball.pixels():
self.canvas[point] = Color(0, 0, 1, 3500)
self.balls = [b for b in self.balls if b not in collided_balls]
self.ensure_enough_balls()
return self.canvas
class TileBallsAnimation(Animation):
def setup(self):
self.iteration = 0
if self.options.random_orientations:
self.random_orientations = True
normalise_speed_options(self.options)
if not self.options.ball_hues:
self.options.ball_hues = [HueRange(0, 360)]
def next_state(self, prev_state, coords):
if prev_state is None:
return TileBallsState(coords, self.options)
self.iteration += 1
if self.options.final_iteration(self.iteration):
raise Finish("Reached max iterations")
return prev_state.tick()
def make_canvas(self, state, coords):
return state.make_canvas()
| [
"stephen@delfick.com"
] | stephen@delfick.com |
26044862575027f745ee34c0dc830acb94aa5f07 | e483b0515cca39f4ddac19645f03fc1695d1939f | /google/ads/google_ads/v1/proto/errors/authorization_error_pb2.py | cfbe691627f0786cfa52ad4d49c1f4417f034197 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | BrunoWMello/google-ads-python | 0af63d2ca273eee96efd8a33252d27112c049442 | 9b074a037d10f0c1208a00d5d41a8e5e25405f28 | refs/heads/master | 2020-05-27T04:37:47.669144 | 2019-05-24T17:07:31 | 2019-05-24T17:07:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 5,564 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/errors/authorization_error.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/errors/authorization_error.proto',
package='google.ads.googleads.v1.errors',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v1.errorsB\027AuthorizationErrorProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/errors;errors\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V1.Errors\312\002\036Google\\Ads\\GoogleAds\\V1\\Errors\352\002\"Google::Ads::GoogleAds::V1::Errors'),
serialized_pb=_b('\n>google/ads/googleads_v1/proto/errors/authorization_error.proto\x12\x1egoogle.ads.googleads.v1.errors\x1a\x1cgoogle/api/annotations.proto\"\xdb\x02\n\x16\x41uthorizationErrorEnum\"\xc0\x02\n\x12\x41uthorizationError\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x1a\n\x16USER_PERMISSION_DENIED\x10\x02\x12#\n\x1f\x44\x45VELOPER_TOKEN_NOT_WHITELISTED\x10\x03\x12\x1e\n\x1a\x44\x45VELOPER_TOKEN_PROHIBITED\x10\x04\x12\x14\n\x10PROJECT_DISABLED\x10\x05\x12\x17\n\x13\x41UTHORIZATION_ERROR\x10\x06\x12\x18\n\x14\x41\x43TION_NOT_PERMITTED\x10\x07\x12\x15\n\x11INCOMPLETE_SIGNUP\x10\x08\x12\x18\n\x14\x43USTOMER_NOT_ENABLED\x10\x18\x12\x0f\n\x0bMISSING_TOS\x10\t\x12 \n\x1c\x44\x45VELOPER_TOKEN_NOT_APPROVED\x10\nB\xf2\x01\n\"com.google.ads.googleads.v1.errorsB\x17\x41uthorizationErrorProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/errors;errors\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V1.Errors\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V1\\Errors\xea\x02\"Google::Ads::GoogleAds::V1::Errorsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_AUTHORIZATIONERRORENUM_AUTHORIZATIONERROR = _descriptor.EnumDescriptor(
name='AuthorizationError',
full_name='google.ads.googleads.v1.errors.AuthorizationErrorEnum.AuthorizationError',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='USER_PERMISSION_DENIED', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEVELOPER_TOKEN_NOT_WHITELISTED', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEVELOPER_TOKEN_PROHIBITED', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROJECT_DISABLED', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUTHORIZATION_ERROR', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTION_NOT_PERMITTED', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INCOMPLETE_SIGNUP', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUSTOMER_NOT_ENABLED', index=9, number=24,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MISSING_TOS', index=10, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEVELOPER_TOKEN_NOT_APPROVED', index=11, number=10,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=156,
serialized_end=476,
)
_sym_db.RegisterEnumDescriptor(_AUTHORIZATIONERRORENUM_AUTHORIZATIONERROR)
_AUTHORIZATIONERRORENUM = _descriptor.Descriptor(
name='AuthorizationErrorEnum',
full_name='google.ads.googleads.v1.errors.AuthorizationErrorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_AUTHORIZATIONERRORENUM_AUTHORIZATIONERROR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=129,
serialized_end=476,
)
_AUTHORIZATIONERRORENUM_AUTHORIZATIONERROR.containing_type = _AUTHORIZATIONERRORENUM
DESCRIPTOR.message_types_by_name['AuthorizationErrorEnum'] = _AUTHORIZATIONERRORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AuthorizationErrorEnum = _reflection.GeneratedProtocolMessageType('AuthorizationErrorEnum', (_message.Message,), dict(
DESCRIPTOR = _AUTHORIZATIONERRORENUM,
__module__ = 'google.ads.googleads_v1.proto.errors.authorization_error_pb2'
,
__doc__ = """Container for enum describing possible authorization errors.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.errors.AuthorizationErrorEnum)
))
_sym_db.RegisterMessage(AuthorizationErrorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"noreply@github.com"
] | BrunoWMello.noreply@github.com |
c22d3c63ae8fccd52a786d3b12defe989314afbb | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_5583.py | a4e7f6afacb41491a152867d5a596bc3b1b3b338 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | # To/From Paging in Python
for number in range(1, 301, 100):
low = number
high = low + 100
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
1c4d18d97746bdc86efb298a2ae3adb8481ac0ef | 9b19c22ce89e2895fc16420fae7114879a3ed1dc | /models/network.py | 3b70a6aac438c1889f17906e40c69b17e99f74aa | [
"MIT"
] | permissive | TangLisan/pytorch-office_finetune | 3b5ad6a1ac25633e8b64ab0a8e316ddcb67add78 | bd953404660b5098103f583852395a1c98cc4ea5 | refs/heads/master | 2020-03-22T21:24:17.253843 | 2018-06-29T17:41:18 | 2018-06-29T17:41:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,680 | py | import torch.nn as nn
from torchvision import models
from .alexnet import alexnet
class AlexModel(nn.Module):
""" AlexNet pretrained on imagenet for Office dataset"""
def __init__(self):
super(AlexModel, self).__init__()
self.restored = False
model_alexnet = models.alexnet(pretrained=True)
self.features = model_alexnet.features
self.fc = nn.Sequential()
for i in range(6):
self.fc.add_module("classifier" + str(i),
model_alexnet.classifier[i])
self.__in_features = model_alexnet.classifier[6].in_features # 4096
self.fc.add_module("final", nn.Linear(4096, 31))
def forward(self, input_data):
input_data = input_data.expand(input_data.data.shape[0], 3, 224, 224)
feature = self.features(input_data)
feature = feature.view(-1, 256*6*6)
class_output = self.fc(feature)
return class_output
class AlexModel_LRN(nn.Module):
""" AlexNet pretrained on imagenet for Office dataset"""
def __init__(self):
super(AlexModel_LRN, self).__init__()
self.restored = False
model_alexnet = alexnet(pretrained=True)
self.features = model_alexnet.features
self.fc = nn.Sequential()
for i in range(6):
self.fc.add_module("classifier" + str(i),
model_alexnet.classifier[i])
self.__in_features = model_alexnet.classifier[6].in_features # 4096
self.classifier = nn.Sequential(
nn.Linear(4096, 31),
)
def forward(self, input_data):
input_data = input_data.expand(input_data.data.shape[0], 3, 227, 227)
feature = self.features(input_data)
feature = feature.view(-1, 256*6*6)
fc = self.fc(feature)
class_output = self.classifier(fc)
return class_output
class ResModel(nn.Module):
def __init__(self):
super(ResModel, self).__init__()
self.restored = False
model_resnet50 = models.resnet50(pretrained=True)
self.features = nn.Sequential(
model_resnet50.conv1,
model_resnet50.bn1,
model_resnet50.relu,
model_resnet50.maxpool,
model_resnet50.layer1,
model_resnet50.layer2,
model_resnet50.layer3,
model_resnet50.layer4,
model_resnet50.avgpool,
)
self.__in_features = model_resnet50.fc.in_features
self.fc = nn.Linear(self.__in_features, 31)
def forward(self, input):
x = self.features(input)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x | [
"wogong38@gmail.com"
] | wogong38@gmail.com |
1827ab09efa83cecd6af6f1223a86a09e3b46a54 | 594fd699d9f8070c867b83b11881ca1f624b417b | /EstruturaDeDecisao/decrescente.py | b7a6621da3be011d88c1fb924021f09189db1f3b | [] | no_license | felipmarqs/exerciciospythonbrasil | f140df2c59b933cc0460d5986afc8c6ddd493556 | 6d02e85ae5986d3b20cfd8781174998d871eeb90 | refs/heads/master | 2020-04-04T05:25:23.751175 | 2018-12-12T18:44:38 | 2018-12-12T18:44:38 | 155,745,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | #Faça um Programa que leia três números e mostre-os em ordem decrescente
n1 = int(input("Digite um número: "))
n2 = int(input("Digite um número: "))
n3 = int(input("Digite um número: "))
if n1 > n2 > n3:
print(n1,n2,n3)
elif n1 > n3 > n2:
print(n1,n3,n2)
elif n2 > n1 > n3:
print(n2,n1,n3)
elif n2 > n3 > n1:
print(n2,n3,n1)
elif n3 > n1 > n2:
print(n3,n1,n2)
elif n3 > n2 > n1:
print(n3,n2,n1)
else:
print('erro') | [
"noreply@github.com"
] | felipmarqs.noreply@github.com |
7bf13d83b6a5e244be420a98f6b63913b90ac084 | ef90992dc00640f42ec615075a9b030b771f81e4 | /Algorithm/linked_list.py | e544fd8b14fb1fc7c5a90bb51f3a36afef290f27 | [] | no_license | korea7030/pythonwork | 88f5e67b33e9143eb40f6c10311a29e08317b77e | 70741acb0477c9348ad3f1ea07a183dda82a5402 | refs/heads/master | 2023-01-08T01:47:15.141471 | 2020-09-09T13:28:20 | 2020-09-09T13:28:20 | 54,378,053 | 0 | 0 | null | 2022-12-26T20:25:43 | 2016-03-21T10:00:07 | Jupyter Notebook | UTF-8 | Python | false | false | 2,540 | py | class Node:
def __init__(self, item):
self.item = item # item
self.next = None # next
class LinkedList:
def __init__(self):
self.head = None # 첫번째 노드를 가리킴
def push(self, item):
new_node = Node(item)
new_node.next = self.head
self.head = new_node
def next(self):
if self.head == None:
print("no next item")
else:
return self.head.item
def printList(self):
current = self.head
if (current is None):
print("Not information")
return
while(current is not None):
print(current.item, end=" ")
current = current.next
def reverse(self):
prev = None
current = self.head
while(current is not None):
next = current.next
current.next = prev
prev = current
current = next
self.head = prev
def search(self, item):
temp = self.head
flag = False
while temp is not None and flag is False:
if(temp.item == item):
flag = True
else:
temp = temp.next
if flag:
print("find", temp.item)
else:
print("Not find")
def remove(self, item):
prev = None
current = self.head
flag = False
while current is not None and flag is False:
if (current.item == item):
flag = True
else:
prev = current
current = current.next
if current is None:
print("not find")
elif prev == None: # 노드가 한개 일때 삭제한 경우
self.head = current.next
else: # None 값 대입
prev.next = current.next
def get_last_n_node(self, n):
temp1 = self.head
temp2 = self.head
if n != 0:
for i in range(n):
temp2 = temp2.next
if temp2 is None:
return None
while temp2.next is not None:
temp2 = temp2.next
temp1 = temp1.next
return temp1.item
if __name__ == "__main__":
linked_list = LinkedList()
linked_list.push(1)
linked_list.push(2)
linked_list.push(3)
linked_list.push(4)
linked_list.printList()
linked_list.reverse()
linked_list.printList()
a = linked_list.get_last_n_node(3)
# print(a)
# print(linked_list.next())
| [
"korea7030@naver.com"
] | korea7030@naver.com |
07314e8b937bc00e5930881452ba7376f3dd6ff5 | 4dd6a8d8024a72a3e2d8e71e86fd34888a149902 | /dacon/dacon01/try_03-2.py | 6883e167ab7fdf28a59e3e0c854b426224425b60 | [] | no_license | KOOKDONGHUN/study | d483b125d349956b325bc5f4d99a4a95dd80ccbc | 517effbb19ddc820d53f0a6194463e7687467af6 | refs/heads/master | 2023-01-14T09:13:48.346502 | 2020-11-20T09:03:25 | 2020-11-20T09:03:25 | 259,818,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,280 | py | from xgboost import XGBClassifier, plot_importance, XGBRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import train_test_split, RandomizedSearchCV, KFold, cross_val_score,GridSearchCV
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score, mean_absolute_error
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectFromModel
train = np.load('./data/dacon/comp1/train.npy')
test = np.load('./data/dacon/comp1/pred.npy')
x = train[:, :-4]
y = train[:, -4:]
print(x)
print(y)
# 회기 모델
x_train, x_test, y_train, y_test = train_test_split(x,y,train_size=0.8,
random_state=0)
# n_estimators = 450
# learning_rate = 0.1
# colsample_bytree = 0.85
# colsample_bylevel = 0.9
# max_depth = 6
# n_jobs = 6
parameters = [{"n_estimators": [2000],
"learning_rate": [0.01],
"max_depth": [5],
"colsample_bytree": [0.79],
"colsample_bylevel": [0.79]}]
parameters2 = [{"n_estimators": [2000],
"learning_rate": [0.01],
"max_depth": [6],
"colsample_bytree": [0.79],
"colsample_bylevel": [0.79]}]
kfold = KFold(n_splits=4, shuffle=True, random_state=66)
model = XGBRegressor(n_jobs=6)
model2 = XGBRegressor(n_jobs=6)
name_ls = ['hhb','hbo2','ca','na']
tmp_dic = dict()
###
model.fit(x_train,y_train[:, 2])
thresholds_2 = np.sort(model.feature_importances_)
model.fit(x_train,y_train[:, 3])
thresholds_3 = np.sort(model.feature_importances_)
###
selection_2=SelectFromModel(model,threshold=thresholds_2[100],prefit=True)
selection_3=SelectFromModel(model,threshold=thresholds_3[100],prefit=True)
selection_x_train_2 = selection_2.transform(x_train)
selection_x_train_3 = selection_3.transform(x_train)
###
selection_x_test_2 = selection_2.transform(x_test)
selection_x_test_3 = selection_3.transform(x_test)
###
test_2 = selection_2.transform(test)
test_3 = selection_3.transform(test)
###
model = GridSearchCV(model, parameters, cv = kfold)
model2 = GridSearchCV(model2, parameters2, cv = kfold)
## hbb, hbo2
for i in range(2):
model.fit(x_train,y_train[:, i])
y_test_pred = model.predict(x_test)
r2 = r2_score(y_test[:, i],y_test_pred)
print(f"r2 : {r2}")
mae = mean_absolute_error(y_test[:, i],y_test_pred)
print(f"mae : {mae}")
y_pred = model.predict(test)
tmp_dic[name_ls[i]] = y_pred
## ca
model2.fit(selection_x_train_2,y_train[:, 2])
y_test_pred = model2.predict(selection_x_test_2)
r2 = r2_score(y_test[:, 2],y_test_pred)
print(f"r2 : {r2}")
mae = mean_absolute_error(y_test[:, 2],y_test_pred)
print(f"mae : {mae}")
y_pred = model2.predict(test_2)
tmp_dic[name_ls[i]] = y_pred
## na
model2.fit(selection_x_train_3,y_train[:, 3])
y_test_pred = model2.predict(selection_x_test_3)
r2 = r2_score(y_test[:, 3],y_test_pred)
print(f"r2 : {r2}")
mae = mean_absolute_error(y_test[:, 3],y_test_pred)
print(f"mae : {mae}")
y_pred = model2.predict(test_3)
tmp_dic[name_ls[i]] = y_pred
df = pd.DataFrame(tmp_dic,range(10000,20000),columns=['hhb','hbo2','ca','na'])
# print(df)
df.to_csv('./submission.csv',index_label='id') | [
"dh3978@naver.com"
] | dh3978@naver.com |
bab04255c775ca3e378bc7a8498fa5ff3a634212 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_jutting.py | 0acbaa3641341ca83124c6516a98b3157035b9b7 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#calss header
class _JUTTING():
def __init__(self,):
self.name = "JUTTING"
self.definitions = jut
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['jut']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
16f97ac15b2d4fecf77b09d2857c71d479c0ad5f | 41cf1a4da983ba4673af58353813f77f229ce9c1 | /nbx/nbmanager/bundle/bundle.py | a28e5d2f0b4562f169f350332da24502007e8b39 | [
"MIT"
] | permissive | dalejung/nbx | d940d38084bc99e41e963d22ea43c1b43d7dffae | e5a9571bc5c91aeb279ff337383405a846bb6128 | refs/heads/master | 2023-07-08T03:22:39.939433 | 2023-06-27T00:32:35 | 2023-06-27T00:32:35 | 17,010,961 | 2 | 1 | null | 2017-04-03T03:39:36 | 2014-02-20T06:03:06 | Python | UTF-8 | Python | false | false | 2,457 | py | import os
import io
import nbformat
from IPython.utils import tz
class Bundle(object):
def __init__(self, path):
name = path.rsplit('/', 1)[-1]
self.name = name
self.path = path
def __repr__(self):
cname = self.__class__.__name__
return "{cname}(name={name}, path={path})".format(cname=cname,
**self.__dict__)
@property
def files(self):
try:
root, dirs, files = next(os.walk(self.path))
# filter out compiled files
files = filter(lambda x: not x.endswith('.pyc'), files)
files = list(files)
except StopIteration:
files = []
return files
class NotebookBundle(Bundle):
@property
def notebook_content(self):
filepath = os.path.join(self.path, self.name)
with io.open(filepath, 'r', encoding='utf-8') as f:
try:
nb = nbformat.read(f, as_version=4)
except Exception as e:
nb = None
return nb
@property
def files(self):
files = super(NotebookBundle, self).files
assert self.name in files
files.remove(self.name)
assert self.name not in files
return files
def get_model(self, content=True, file_content=True):
os_path = os.path.join(self.path, self.name)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model = {}
model['name'] = self.name
model['path'] = self.path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'notebook'
model['is_bundle'] = True
model['content'] = None
if content:
model['content'] = self.notebook_content
files = {}
for fn in self.files:
with open(os.path.join(self.path, fn), 'rb') as f:
data = None
if file_content:
try:
data = f.read().decode('utf-8')
except UnicodeDecodeError:
# TODO how to deal with binary data?
# right now we skip
continue
files[fn] = data
model['__files'] = files
return model
| [
"dale@dalejung.com"
] | dale@dalejung.com |
9c58d676834976a2ca45ac492bc666c494703314 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /homeassistant/components/cloud/subscription.py | 9a62f2d115c1f99c497f55c6ad79d419a387606b | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 1,520 | py | """Subscription information."""
from __future__ import annotations
import asyncio
import logging
from typing import Any
from aiohttp.client_exceptions import ClientError
from hass_nabucasa import Cloud, cloud_api
from .client import CloudClient
from .const import REQUEST_TIMEOUT
_LOGGER = logging.getLogger(__name__)
async def async_subscription_info(cloud: Cloud[CloudClient]) -> dict[str, Any] | None:
"""Fetch the subscription info."""
try:
async with asyncio.timeout(REQUEST_TIMEOUT):
return await cloud_api.async_subscription_info(cloud)
except asyncio.TimeoutError:
_LOGGER.error(
(
"A timeout of %s was reached while trying to fetch subscription"
" information"
),
REQUEST_TIMEOUT,
)
except ClientError:
_LOGGER.error("Failed to fetch subscription information")
return None
async def async_migrate_paypal_agreement(
cloud: Cloud[CloudClient],
) -> dict[str, Any] | None:
"""Migrate a paypal agreement from legacy."""
try:
async with asyncio.timeout(REQUEST_TIMEOUT):
return await cloud_api.async_migrate_paypal_agreement(cloud)
except asyncio.TimeoutError:
_LOGGER.error(
"A timeout of %s was reached while trying to start agreement migration",
REQUEST_TIMEOUT,
)
except ClientError as exception:
_LOGGER.error("Failed to start agreement migration - %s", exception)
return None
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
c9945ea2257dffedc8c00d4d6dec9d3d44ac2bc3 | bb4a4504a7051484173c8e9933b06fdf1384c2f7 | /src/simulator/controllers/keyboard_controller.py | c27da2bf63efeafabf08709717b1f577bf21efb9 | [] | no_license | uncobruce/PathBench2D | dd4794a5b19ee95ad7555d512d0e36063e2c6330 | 57c6397fe990de3088aa99da1602f9872b90d0b8 | refs/heads/master | 2023-01-09T17:16:35.200009 | 2020-08-05T20:45:28 | 2020-08-05T20:45:28 | 274,780,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | import pygame
from simulator.controllers.controller import Controller
from simulator.models.model import Model
from simulator.services.debug import DebugLevel
from simulator.services.event_manager.events.keyboard_event import KeyboardEvent
from simulator.services.event_manager.events.mouse_event import MouseEvent
from simulator.services.event_manager.events.quit_event import QuitEvent
from simulator.services.services import Services
class KeyboardController(Controller):
"""
Handles keyboard input
"""
def __init__(self, services: Services, model: Model) -> None:
super().__init__(services, model)
self._services.ev_manager.register_tick_listener(self)
def tick(self) -> None:
if self._services.render_engine.is_display_init() == 0:
return
# Called for each game tick. We check our keyboard presses here.
for event in self._services.render_engine.get_events():
# handle window manager closing our window
if event.type == pygame.QUIT:
self._services.ev_manager.post(QuitEvent())
# handle key down events
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self._services.ev_manager.post(QuitEvent())
else:
# post any other keys to the message queue for everyone else to see
ev = KeyboardEvent(event.key)
self._services.ev_manager.post(ev)
self._services.debug.write(ev, DebugLevel.MEDIUM)
if event.type == pygame.MOUSEMOTION or \
event.type == pygame.MOUSEBUTTONDOWN or \
event.type == pygame.MOUSEBUTTONUP:
self._services.ev_manager.post(MouseEvent(event))
| [
"daniel.lenton11@imperial.ac.uk"
] | daniel.lenton11@imperial.ac.uk |
b87d30330bf460eabc1f9520646d672f29282091 | ff88a620c7437af9af946643cd65f06c99fe3601 | /IntermediateCodeAndTesting/OldTaskVersions/ShapeLearningTaskFeedback6_lastrun.py | a055bea8af96b5c63a771ff2c4a635284bb6c795 | [] | no_license | bissettp/TrainedInhibitionTask | c2f20dadbb0e440c4fcf2bd3c4d670a7416df93c | 82727bd3ffa101209a61f2ff4f057f8896522d5d | refs/heads/master | 2020-06-06T20:02:16.470092 | 2015-06-23T17:56:15 | 2015-06-23T17:56:15 | 34,129,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,393 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy2 Experiment Builder (v1.82.01), Wed Mar 18 16:36:40 2015
If you publish work using this script please cite the relevant PsychoPy publications
Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import division # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui
from psychopy.constants import * # things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
expName = 'LearningTask' # from the Builder filename that created this script
expInfo = {'participant':'', 'gender (m/f)':'', 'age':'', 'session':03}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data' + os.sep + '%s_%s' %(expInfo['participant'], expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/ShapeLearningTaskFeedback6.psyexp',
savePickle=True, saveWideText=True,
dataFileName=filename)
#save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.WARNING)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False,
monitor='testMonitor', color='black', colorSpace='rgb',
blendMode='avg', useFBO=True,
)
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
frameDur = 1.0/round(expInfo['frameRate'])
else:
frameDur = 1.0/60.0 # couldn't get a reliable measure so guess
# Initialize components for Routine "StimSetup"
StimSetupClock = core.Clock()
import random
#Redo = 1
colors = ['yellow', 'white', 'orange', 'magenta', 'green', 'gray', 'cyan', 'blue']
shapes = ['triangle', 'square', 'line', 'invertedtriangle', 'hexagon', 'diamond', 'cross', 'circle']
rewards = [0.5, 1, 2, 4] * 2
conditions = ['go', 'go', 'go', 'go', 'stop', 'stop', 'stop', 'stop']
trialDetailsList = []
shuffle(colors)
shuffle(shapes)
for i, color in enumerate(colors): # cycle through each color and keep track of an index number
trialDetails = {} # a dictionary of key-value pairs
trialDetails['fileName'] = shapes[i] + color + '.gif'
trialDetails['reward'] = rewards[i]
trialDetails['condition'] = conditions[i]
trialDetailsList.append(trialDetails)
shuffle(trialDetailsList) # do this now to ensure that order of presentation of rewards and conditions is also shuffled
ConditionOne = trialDetailsList[0]
ConditionTwo = trialDetailsList[1]
ConditionThree = trialDetailsList[2]
ConditionFour = trialDetailsList[3]
ConditionFive = trialDetailsList[4]
ConditionSix = trialDetailsList[5]
ConditionSeven = trialDetailsList[6]
ConditionEight = trialDetailsList[7]
# Initialize components for Routine "instrPractice"
instrPracticeClock = core.Clock()
instruct1 = visual.TextStim(win=win, ori=0, name='instruct1',
text='A shape stimulus will appear on every trial. \n\nIf it appears in the upper right quadrant, PRESS Y\n\nIf it appears in the lower right quadrant, PRESS H\n\nIf it appears in the lower left quadrant, PRESS G\n\nIf it appears in the upper left quadrant, PRESS T\n\nPress any key when you are ready to proceed. ', font='Arial',
pos=[0, 0], height=0.07, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "NewTry"
NewTryClock = core.Clock()
# Initialize components for Routine "trial"
trialClock = core.Clock()
text = visual.TextStim(win=win, ori=0, name='text',
text='+', font='Arial',
pos=[0, 0], height=1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=-2.0)
image_2 = visual.ImageStim(win=win, name='image_2',units='pix',
image='sin', mask=None,
ori=0, pos=[0,0], size=[101, 101],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-3.0)
# Initialize components for Routine "feedback"
feedbackClock = core.Clock()
#message variable just needs some value at start
message=0
feedback_2 = visual.TextStim(win=win, ori=0, name='feedback_2',
text='default text', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=[1,1,1], colorSpace='rgb', opacity=1,
depth=-1.0)
# Initialize components for Routine "instrMain"
instrMainClock = core.Clock()
instr2 = visual.TextStim(win=win, ori=0, name='instr2',
text='A shape stimulus will appear on every trial. \n\nIf it appears in the upper right quadrant, PRESS Y\n\nIf it appears in the lower right quadrant, PRESS H\n\nIf it appears in the lower left quadrant, PRESS G\n\nIf it appears in the upper left quadrant, PRESS T\n\nPress any key when you are ready to proceed. ', font='Arial',
pos=[0, 0], height=0.075, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "ResetAtBlock"
ResetAtBlockClock = core.Clock()
# Initialize components for Routine "trial"
trialClock = core.Clock()
text = visual.TextStim(win=win, ori=0, name='text',
text='+', font='Arial',
pos=[0, 0], height=1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=-2.0)
image_2 = visual.ImageStim(win=win, name='image_2',units='pix',
image='sin', mask=None,
ori=0, pos=[0,0], size=[101, 101],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-3.0)
# Initialize components for Routine "feedback"
feedbackClock = core.Clock()
#message variable just needs some value at start
message=0
feedback_2 = visual.TextStim(win=win, ori=0, name='feedback_2',
text='default text', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=[1,1,1], colorSpace='rgb', opacity=1,
depth=-1.0)
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
#------Prepare to start Routine "StimSetup"-------
t = 0
StimSetupClock.reset() # clock
frameN = -1
# update component parameters for each repeat
# keep track of which components have finished
StimSetupComponents = []
for thisComponent in StimSetupComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "StimSetup"-------
continueRoutine = True
while continueRoutine:
# get current time
t = StimSetupClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in StimSetupComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "StimSetup"-------
for thisComponent in StimSetupComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "StimSetup" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
#------Prepare to start Routine "instrPractice"-------
t = 0
instrPracticeClock.reset() # clock
frameN = -1
# update component parameters for each repeat
ok1 = event.BuilderKeyResponse() # create an object of type KeyResponse
ok1.status = NOT_STARTED
# keep track of which components have finished
instrPracticeComponents = []
instrPracticeComponents.append(instruct1)
instrPracticeComponents.append(ok1)
for thisComponent in instrPracticeComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "instrPractice"-------
continueRoutine = True
while continueRoutine:
# get current time
t = instrPracticeClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instruct1* updates
if t >= 0.0 and instruct1.status == NOT_STARTED:
# keep track of start time/frame for later
instruct1.tStart = t # underestimates by a little under one frame
instruct1.frameNStart = frameN # exact frame index
instruct1.setAutoDraw(True)
# *ok1* updates
if t >= 0.0 and ok1.status == NOT_STARTED:
# keep track of start time/frame for later
ok1.tStart = t # underestimates by a little under one frame
ok1.frameNStart = frameN # exact frame index
ok1.status = STARTED
# keyboard checking is just starting
ok1.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if ok1.status == STARTED:
theseKeys = event.getKeys()
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
ok1.keys = theseKeys[-1] # just the last key pressed
ok1.rt = ok1.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instrPracticeComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "instrPractice"-------
for thisComponent in instrPracticeComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if ok1.keys in ['', [], None]: # No response was made
ok1.keys=None
# store data for thisExp (ExperimentHandler)
thisExp.addData('ok1.keys',ok1.keys)
if ok1.keys != None: # we had a response
thisExp.addData('ok1.rt', ok1.rt)
thisExp.nextEntry()
# the Routine "instrPractice" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
practiceTrials = data.TrialHandler(nReps=2.0, method='fullRandom',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/ShapeLearningTaskFeedback6.psyexp',
trialList=data.importConditions('Trialtypes5.xlsx'),
seed=None, name='practiceTrials')
thisExp.addLoop(practiceTrials) # add the loop to the experiment
thisPracticeTrial = practiceTrials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisPracticeTrial.rgb)
if thisPracticeTrial != None:
for paramName in thisPracticeTrial.keys():
exec(paramName + '= thisPracticeTrial.' + paramName)
for thisPracticeTrial in practiceTrials:
currentLoop = practiceTrials
# abbreviate parameter names if possible (e.g. rgb = thisPracticeTrial.rgb)
if thisPracticeTrial != None:
for paramName in thisPracticeTrial.keys():
exec(paramName + '= thisPracticeTrial.' + paramName)
#------Prepare to start Routine "NewTry"-------
t = 0
NewTryClock.reset() # clock
frameN = -1
# update component parameters for each repeat
Redo = 2
# keep track of which components have finished
NewTryComponents = []
for thisComponent in NewTryComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "NewTry"-------
continueRoutine = True
while continueRoutine:
# get current time
t = NewTryClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in NewTryComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "NewTry"-------
for thisComponent in NewTryComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "NewTry" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
ReDoLoop = data.TrialHandler(nReps=Redo, method='random',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/ShapeLearningTaskFeedback6.psyexp',
trialList=[None],
seed=None, name='ReDoLoop')
thisExp.addLoop(ReDoLoop) # add the loop to the experiment
thisReDoLoop = ReDoLoop.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisReDoLoop.rgb)
if thisReDoLoop != None:
for paramName in thisReDoLoop.keys():
exec(paramName + '= thisReDoLoop.' + paramName)
for thisReDoLoop in ReDoLoop:
currentLoop = ReDoLoop
# abbreviate parameter names if possible (e.g. rgb = thisReDoLoop.rgb)
if thisReDoLoop != None:
for paramName in thisReDoLoop.keys():
exec(paramName + '= thisReDoLoop.' + paramName)
#------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
routineTimer.add(1.500000)
# update component parameters for each repeat
#Redo = 1
import random
Integers = [1, 2, 3, 4, 5, 6, 7, 8]
if Redo == 2:
ConditionChoice = random.choice(Integers)
if ConditionChoice == 1:
CurrentStimulus = ConditionOne['fileName']
elif ConditionChoice == 2:
CurrentStimulus = ConditionTwo['fileName']
elif ConditionChoice == 3:
CurrentStimulus = ConditionThree['fileName']
elif ConditionChoice == 4:
CurrentStimulus = ConditionFour['fileName']
elif ConditionChoice == 5:
CurrentStimulus = ConditionFive['fileName']
elif ConditionChoice == 6:
CurrentStimulus = ConditionSix['fileName']
elif ConditionChoice == 7:
CurrentStimulus = ConditionSeven['fileName']
elif ConditionChoice == 8:
CurrentStimulus = ConditionEight['fileName']
resp = event.BuilderKeyResponse() # create an object of type KeyResponse
resp.status = NOT_STARTED
image_2.setPos([xPos, yPos])
image_2.setImage(CurrentStimulus)
# keep track of which components have finished
trialComponents = []
trialComponents.append(resp)
trialComponents.append(text)
trialComponents.append(image_2)
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "trial"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *resp* updates
if t >= .5 and resp.status == NOT_STARTED:
# keep track of start time/frame for later
resp.tStart = t # underestimates by a little under one frame
resp.frameNStart = frameN # exact frame index
resp.status = STARTED
# keyboard checking is just starting
resp.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if resp.status == STARTED and t >= (.5 + (1-win.monitorFramePeriod*0.75)): #most of one frame period left
resp.status = STOPPED
if resp.status == STARTED:
theseKeys = event.getKeys(keyList=['t', 'y', 'g', 'h'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
resp.keys = theseKeys[-1] # just the last key pressed
resp.rt = resp.clock.getTime()
# was this 'correct'?
if (resp.keys == str(corrAns)) or (resp.keys == corrAns):
resp.corr = 1
else:
resp.corr = 0
# a response ends the routine
continueRoutine = False
# *text* updates
if t >= 0.0 and text.status == NOT_STARTED:
# keep track of start time/frame for later
text.tStart = t # underestimates by a little under one frame
text.frameNStart = frameN # exact frame index
text.setAutoDraw(True)
if text.status == STARTED and t >= (0.0 + (1.5-win.monitorFramePeriod*0.75)): #most of one frame period left
text.setAutoDraw(False)
# *image_2* updates
if t >= .5 and image_2.status == NOT_STARTED:
# keep track of start time/frame for later
image_2.tStart = t # underestimates by a little under one frame
image_2.frameNStart = frameN # exact frame index
image_2.setAutoDraw(True)
if image_2.status == STARTED and t >= (.5 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
image_2.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if resp.keys in ['', [], None]: # No response was made
resp.keys=None
# was no response the correct answer?!
if str(corrAns).lower() == 'none': resp.corr = 1 # correct non-response
else: resp.corr = 0 # failed to respond (incorrectly)
# store data for ReDoLoop (TrialHandler)
ReDoLoop.addData('resp.keys',resp.keys)
ReDoLoop.addData('resp.corr', resp.corr)
if resp.keys != None: # we had a response
ReDoLoop.addData('resp.rt', resp.rt)
#------Prepare to start Routine "feedback"-------
t = 0
feedbackClock.reset() # clock
frameN = -1
routineTimer.add(1.000000)
# update component parameters for each repeat
DisplayReward = random.randrange(1, 6)
if resp.corr:#stored on last run routine
if DisplayReward == 1:
message = "You won " + str(0);
else:
if ConditionChoice == 1:
CurrentReward = ConditionOne['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 2:
CurrentReward = ConditionTwo['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 3:
CurrentReward = ConditionThree['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 4:
CurrentReward = ConditionFour['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 5:
CurrentReward = ConditionFive['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 6:
CurrentReward = ConditionSix['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 7:
CurrentReward = ConditionSeven['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 8:
CurrentReward = ConditionEight['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif resp.keys is None: #or len(key_resp.keys)<1:
#elif resp.rt == 0:
message ="Too Slow"
else:
message="Wrong"
#if DisplayReward == 1:
# message = 0;
#else:
# message = CurrentReward + (random.randrange(-25, 26)*.01)
# message="Correct! RT=%.3f" %(resp.rt)
feedback_2.setText(message)
# keep track of which components have finished
feedbackComponents = []
feedbackComponents.append(feedback_2)
for thisComponent in feedbackComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "feedback"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = feedbackClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *feedback_2* updates
if t >= 0.0 and feedback_2.status == NOT_STARTED:
# keep track of start time/frame for later
feedback_2.tStart = t # underestimates by a little under one frame
feedback_2.frameNStart = frameN # exact frame index
feedback_2.setAutoDraw(True)
if feedback_2.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
feedback_2.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in feedbackComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "feedback"-------
for thisComponent in feedbackComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
if resp.corr:
Redo = 0
else:
Redo = 999
thisExp.nextEntry()
# completed Redo repeats of 'ReDoLoop'
# get names of stimulus parameters
if ReDoLoop.trialList in ([], [None], None): params = []
else: params = ReDoLoop.trialList[0].keys()
# save data for this loop
ReDoLoop.saveAsExcel(filename + '.xlsx', sheetName='ReDoLoop',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
thisExp.nextEntry()
# completed 2.0 repeats of 'practiceTrials'
# get names of stimulus parameters
if practiceTrials.trialList in ([], [None], None): params = []
else: params = practiceTrials.trialList[0].keys()
# save data for this loop
practiceTrials.saveAsExcel(filename + '.xlsx', sheetName='practiceTrials',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
#------Prepare to start Routine "instrMain"-------
t = 0
instrMainClock.reset() # clock
frameN = -1
# update component parameters for each repeat
ok2 = event.BuilderKeyResponse() # create an object of type KeyResponse
ok2.status = NOT_STARTED
# keep track of which components have finished
instrMainComponents = []
instrMainComponents.append(instr2)
instrMainComponents.append(ok2)
for thisComponent in instrMainComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "instrMain"-------
continueRoutine = True
while continueRoutine:
# get current time
t = instrMainClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instr2* updates
if t >= 0.0 and instr2.status == NOT_STARTED:
# keep track of start time/frame for later
instr2.tStart = t # underestimates by a little under one frame
instr2.frameNStart = frameN # exact frame index
instr2.setAutoDraw(True)
# *ok2* updates
if t >= 0.0 and ok2.status == NOT_STARTED:
# keep track of start time/frame for later
ok2.tStart = t # underestimates by a little under one frame
ok2.frameNStart = frameN # exact frame index
ok2.status = STARTED
# keyboard checking is just starting
event.clearEvents(eventType='keyboard')
if ok2.status == STARTED:
theseKeys = event.getKeys()
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instrMainComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "instrMain"-------
for thisComponent in instrMainComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "instrMain" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
Blocks = data.TrialHandler(nReps=4, method='sequential',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/ShapeLearningTaskFeedback6.psyexp',
trialList=[None],
seed=None, name='Blocks')
thisExp.addLoop(Blocks) # add the loop to the experiment
thisBlock = Blocks.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisBlock.rgb)
if thisBlock != None:
for paramName in thisBlock.keys():
exec(paramName + '= thisBlock.' + paramName)
for thisBlock in Blocks:
currentLoop = Blocks
# abbreviate parameter names if possible (e.g. rgb = thisBlock.rgb)
if thisBlock != None:
for paramName in thisBlock.keys():
exec(paramName + '= thisBlock.' + paramName)
#------Prepare to start Routine "ResetAtBlock"-------
t = 0
ResetAtBlockClock.reset() # clock
frameN = -1
# update component parameters for each repeat
Redo = 1
# keep track of which components have finished
ResetAtBlockComponents = []
for thisComponent in ResetAtBlockComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "ResetAtBlock"-------
continueRoutine = True
while continueRoutine:
# get current time
t = ResetAtBlockClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ResetAtBlockComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "ResetAtBlock"-------
for thisComponent in ResetAtBlockComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "ResetAtBlock" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=25.0, method='fullRandom',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/ShapeLearningTaskFeedback6.psyexp',
trialList=data.importConditions('Trialtypes5.xlsx'),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
# set up handler to look after randomisation of conditions etc
ReDoLoopMain = data.TrialHandler(nReps=Redo, method='random',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/OneDrive/Poldrack/TrainedInhibition/PsychoPy/ShapeLearningTaskFeedback6.psyexp',
trialList=[None],
seed=None, name='ReDoLoopMain')
thisExp.addLoop(ReDoLoopMain) # add the loop to the experiment
thisReDoLoopMain = ReDoLoopMain.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisReDoLoopMain.rgb)
if thisReDoLoopMain != None:
for paramName in thisReDoLoopMain.keys():
exec(paramName + '= thisReDoLoopMain.' + paramName)
for thisReDoLoopMain in ReDoLoopMain:
currentLoop = ReDoLoopMain
# abbreviate parameter names if possible (e.g. rgb = thisReDoLoopMain.rgb)
if thisReDoLoopMain != None:
for paramName in thisReDoLoopMain.keys():
exec(paramName + '= thisReDoLoopMain.' + paramName)
#------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
routineTimer.add(1.500000)
# update component parameters for each repeat
#Redo = 1
import random
Integers = [1, 2, 3, 4, 5, 6, 7, 8]
if Redo == 2:
ConditionChoice = random.choice(Integers)
if ConditionChoice == 1:
CurrentStimulus = ConditionOne['fileName']
elif ConditionChoice == 2:
CurrentStimulus = ConditionTwo['fileName']
elif ConditionChoice == 3:
CurrentStimulus = ConditionThree['fileName']
elif ConditionChoice == 4:
CurrentStimulus = ConditionFour['fileName']
elif ConditionChoice == 5:
CurrentStimulus = ConditionFive['fileName']
elif ConditionChoice == 6:
CurrentStimulus = ConditionSix['fileName']
elif ConditionChoice == 7:
CurrentStimulus = ConditionSeven['fileName']
elif ConditionChoice == 8:
CurrentStimulus = ConditionEight['fileName']
resp = event.BuilderKeyResponse() # create an object of type KeyResponse
resp.status = NOT_STARTED
image_2.setPos([xPos, yPos])
image_2.setImage(CurrentStimulus)
# keep track of which components have finished
trialComponents = []
trialComponents.append(resp)
trialComponents.append(text)
trialComponents.append(image_2)
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "trial"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *resp* updates
if t >= .5 and resp.status == NOT_STARTED:
# keep track of start time/frame for later
resp.tStart = t # underestimates by a little under one frame
resp.frameNStart = frameN # exact frame index
resp.status = STARTED
# keyboard checking is just starting
resp.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if resp.status == STARTED and t >= (.5 + (1-win.monitorFramePeriod*0.75)): #most of one frame period left
resp.status = STOPPED
if resp.status == STARTED:
theseKeys = event.getKeys(keyList=['t', 'y', 'g', 'h'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
resp.keys = theseKeys[-1] # just the last key pressed
resp.rt = resp.clock.getTime()
# was this 'correct'?
if (resp.keys == str(corrAns)) or (resp.keys == corrAns):
resp.corr = 1
else:
resp.corr = 0
# a response ends the routine
continueRoutine = False
# *text* updates
if t >= 0.0 and text.status == NOT_STARTED:
# keep track of start time/frame for later
text.tStart = t # underestimates by a little under one frame
text.frameNStart = frameN # exact frame index
text.setAutoDraw(True)
if text.status == STARTED and t >= (0.0 + (1.5-win.monitorFramePeriod*0.75)): #most of one frame period left
text.setAutoDraw(False)
# *image_2* updates
if t >= .5 and image_2.status == NOT_STARTED:
# keep track of start time/frame for later
image_2.tStart = t # underestimates by a little under one frame
image_2.frameNStart = frameN # exact frame index
image_2.setAutoDraw(True)
if image_2.status == STARTED and t >= (.5 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
image_2.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if resp.keys in ['', [], None]: # No response was made
resp.keys=None
# was no response the correct answer?!
if str(corrAns).lower() == 'none': resp.corr = 1 # correct non-response
else: resp.corr = 0 # failed to respond (incorrectly)
# store data for ReDoLoopMain (TrialHandler)
ReDoLoopMain.addData('resp.keys',resp.keys)
ReDoLoopMain.addData('resp.corr', resp.corr)
if resp.keys != None: # we had a response
ReDoLoopMain.addData('resp.rt', resp.rt)
#------Prepare to start Routine "feedback"-------
t = 0
feedbackClock.reset() # clock
frameN = -1
routineTimer.add(1.000000)
# update component parameters for each repeat
DisplayReward = random.randrange(1, 6)
if resp.corr:#stored on last run routine
if DisplayReward == 1:
message = "You won " + str(0);
else:
if ConditionChoice == 1:
CurrentReward = ConditionOne['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 2:
CurrentReward = ConditionTwo['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 3:
CurrentReward = ConditionThree['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 4:
CurrentReward = ConditionFour['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 5:
CurrentReward = ConditionFive['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 6:
CurrentReward = ConditionSix['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 7:
CurrentReward = ConditionSeven['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif ConditionChoice == 8:
CurrentReward = ConditionEight['reward']
message = "You won " + str(CurrentReward + (random.randrange(-25, 26)*.01))
elif resp.keys is None: #or len(key_resp.keys)<1:
#elif resp.rt == 0:
message ="Too Slow"
else:
message="Wrong"
#if DisplayReward == 1:
# message = 0;
#else:
# message = CurrentReward + (random.randrange(-25, 26)*.01)
# message="Correct! RT=%.3f" %(resp.rt)
feedback_2.setText(message)
# keep track of which components have finished
feedbackComponents = []
feedbackComponents.append(feedback_2)
for thisComponent in feedbackComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "feedback"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = feedbackClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *feedback_2* updates
if t >= 0.0 and feedback_2.status == NOT_STARTED:
# keep track of start time/frame for later
feedback_2.tStart = t # underestimates by a little under one frame
feedback_2.frameNStart = frameN # exact frame index
feedback_2.setAutoDraw(True)
if feedback_2.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
feedback_2.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in feedbackComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "feedback"-------
for thisComponent in feedbackComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
if resp.corr:
Redo = 0
else:
Redo = 999
thisExp.nextEntry()
# completed Redo repeats of 'ReDoLoopMain'
# get names of stimulus parameters
if ReDoLoopMain.trialList in ([], [None], None): params = []
else: params = ReDoLoopMain.trialList[0].keys()
# save data for this loop
ReDoLoopMain.saveAsExcel(filename + '.xlsx', sheetName='ReDoLoopMain',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
thisExp.nextEntry()
# completed 25.0 repeats of 'trials'
# get names of stimulus parameters
if trials.trialList in ([], [None], None): params = []
else: params = trials.trialList[0].keys()
# save data for this loop
trials.saveAsExcel(filename + '.xlsx', sheetName='trials',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
thisExp.nextEntry()
# completed 4 repeats of 'Blocks'
# get names of stimulus parameters
if Blocks.trialList in ([], [None], None): params = []
else: params = Blocks.trialList[0].keys()
# save data for this loop
Blocks.saveAsExcel(filename + '.xlsx', sheetName='Blocks',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
win.close()
core.quit()
| [
"pbissett@stanford.edu"
] | pbissett@stanford.edu |
72cc761d18a77df7d2cf09c6fa7002298b049768 | 534570bbb873293bd2646a1567b63d162fbba13c | /Python/236.lowest-common-ancestor-of-a-binary-tree.py | 250e65cbb6bfc472fa756c8203cf2e14635ad403 | [] | no_license | XinheLIU/Coding-Interview | fa3df0f7167fb1bc6c8831748249ebaa6f164552 | d6034c567cef252cfafca697aa316c7ad4e7d128 | refs/heads/master | 2022-09-17T14:30:54.371370 | 2022-08-19T15:53:35 | 2022-08-19T15:53:35 | 146,382,499 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | #
# @lc app=leetcode id=236 lang=python3
#
# [236] Lowest Common Ancestor of a Binary Tree
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if not root or root is p or root is q:
return root
l, r = self.lowestCommonAncestor(root.left, p, q), self.lowestCommonAncestor(root.right, p, q)
if l and r:
return root
elif l:
return l
else:
return r
# @lc code=end
| [
"LIUXinhe@outlook.com"
] | LIUXinhe@outlook.com |
01d21fb423c5586a3bd3e9bcb8073f54c29bc389 | 59c55725576bbf0e2f6617507ba2f1db639abb3f | /project_time_sequence/project_time_sequence.py | c5f8461cb428c8e5e03210f485c31d8270f2ee7f | [] | no_license | bmya/eficent-odoo-addons | e3426ebaf1f59e52726253fc1dd36a09d9363059 | 5d8ddfa384ab4417f42bda103b71d926848035f6 | refs/heads/7.0 | 2021-01-21T16:48:55.312452 | 2015-11-04T14:11:19 | 2015-11-04T14:11:19 | 45,649,141 | 1 | 3 | null | 2015-11-06T00:35:17 | 2015-11-06T00:35:17 | null | UTF-8 | Python | false | false | 4,604 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Eficent (<http://www.eficent.com/>)
# <contact@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import time
from datetime import datetime, date, timedelta
import decimal_precision as dp
from openerp.tools.translate import _
from openerp.osv import fields, osv
import netsvc
import tools
class task(osv.osv):
_inherit = 'project.task'
def get_related_tasks(self, cr, uid, ids, context=None):
result = {}
data = []
read_data = []
tasks_br = self.browse(cr, uid, ids, context=None)
for t in tasks_br:
result[t.id] = True
for child in t.successor_ids:
result[child.id]=True
return result
def _predecessor_ids_calc(self, cr, uid, ids, prop, unknow_none, unknow_dict):
if not ids:
return []
res = []
data =[]
tasks_br = self.browse(cr, uid, ids, context=None)
for t in tasks_br:
data =[]
str_data = ""
if t.predecessor_ids:
for parent in t.predecessor_ids:
data.insert(0, str(parent.id))
else:
data.insert(0,'')
data.sort(cmp=None, key=None, reverse=False)
str_data = ', '.join(data)
res.append((t.id, str_data))
return dict(res)
def _predecessor_names_calc(self, cr, uid, ids, prop, unknow_none, unknow_dict):
if not ids:
return []
res = []
data =[]
tasks_br = self.browse(cr, uid, ids, context=None)
for t in tasks_br:
data =[]
str_data = ""
if t.predecessor_ids:
for parent in t.predecessor_ids:
data.insert(0, tools.ustr(parent.name))
else:
data.insert(0,'')
data.sort(cmp=None, key=None, reverse=False)
str_data = ', '.join(data)
res.append((t.id, str_data))
return dict(res)
_columns = {
'predecessor_ids': fields.many2many('project.task', 'project_task_predecessor_rel', 'task_id', 'parent_id', 'Predecessor Tasks'),
'successor_ids': fields.many2many('project.task', 'project_task_predecessor_rel', 'parent_id', 'task_id', 'Successor Tasks'),
'predecessor_ids_str': fields.function(_predecessor_ids_calc, method=True, type='char', string='Predecessor tasks', size=20, help='Predecessor tasks ids',
),
'predecessor_names_str': fields.function(_predecessor_names_calc, method=True, type='char', string='Predecessor tasks', size=512, help='Predecessor tasks ids',
),
}
def do_link_predecessors(self, cr, uid, task_id, link_predecessors_data, context=None):
task_br = self.browse(cr, uid, task_id, context=context)
self.write(cr, uid, [task_br.id], {
'predecessor_ids': [(6, 0, link_predecessors_data['predecessor_ids'])],
})
return True
task()
| [
"jordi.ballester@eficent.com"
] | jordi.ballester@eficent.com |
a7dc4845696972e5fcd8894971051999693616db | e93690e8ac06fd6aa2f7fe7d3ea56978e787e496 | /optimizeDLM/subset/optimizeDependencyLength_POS_NoSplit.py | 729d42770061870f241340a3f4596929354044c2 | [] | no_license | m-hahn/optimization-landscapes | 8446fbb0ae783f7aa76278e8a5f4cf5e6f4b2cd8 | b16f640dd855a912f52844882b3de701e5b9eca6 | refs/heads/master | 2023-08-12T01:44:18.434912 | 2021-10-03T14:37:11 | 2021-10-03T14:37:11 | 273,661,277 | 0 | 0 | null | 2021-04-15T04:39:52 | 2020-06-20T07:36:12 | TeX | UTF-8 | Python | false | false | 16,372 | py | # Optimizing a grammar for dependency length minimization
import random
import sys
objectiveName = "DepL"
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--language', type=str)
parser.add_argument('--size', type=int)
parser.add_argument('--entropy_weight', type=float, default=0.001)
parser.add_argument('--lr_policy', type=float, default=0.1)
parser.add_argument('--momentum', type=float, default=0.9)
args = parser.parse_args()
myID = random.randint(0,10000000)
posUni = set()
posFine = set()
deps = ["acl", "acl:relcl", "advcl", "advmod", "amod", "appos", "aux", "auxpass", "case", "cc", "ccomp", "compound", "compound:prt", "conj", "conj:preconj", "cop", "csubj", "csubjpass", "dep", "det", "det:predet", "discourse", "dobj", "expl", "foreign", "goeswith", "iobj", "list", "mark", "mwe", "neg", "nmod", "nmod:npmod", "nmod:poss", "nmod:tmod", "nsubj", "nsubjpass", "nummod", "parataxis", "punct", "remnant", "reparandum", "root", "vocative", "xcomp"]
from math import log, exp
from random import random, shuffle
from corpusIterator_V import CorpusIterator_V as CorpusIterator
originalDistanceWeights = {}
def makeCoarse(x):
if ":" in x:
return x[:x.index(":")]
return x
def initializeOrderTable():
orderTable = {}
keys = set()
vocab = {}
distanceSum = {}
distanceCounts = {}
depsVocab = set()
for partition in ["together"]:
for sentence in CorpusIterator(args.language,partition, shuffleDataSeed=myID, size=args.size).iterator():
for line in sentence:
vocab[line["word"]] = vocab.get(line["word"], 0) + 1
line["fine_dep"] = line["dep"]
depsVocab.add(line["fine_dep"])
posFine.add(line["posFine"])
posUni.add(line["posUni"])
if line["fine_dep"] == "root":
continue
posHere = line["posUni"]
posHead = sentence[line["head"]-1]["posUni"]
dep = line["fine_dep"]
direction = "HD" if line["head"] < line["index"] else "DH"
key = (posHead, dep, posHere)
keyWithDir = (dep, direction)
orderTable[keyWithDir] = orderTable.get(keyWithDir, 0) + 1
keys.add(key)
distanceCounts[key] = distanceCounts.get(key,0.0) + 1.0
distanceSum[key] = distanceSum.get(key,0.0) + abs(line["index"] - line["head"])
#print orderTable
dhLogits = {}
for key in keys:
hd = orderTable.get((key, "HD"), 0) + 1.0
dh = orderTable.get((key, "DH"), 0) + 1.0
dhLogit = log(dh) - log(hd)
dhLogits[key] = dhLogit
return dhLogits, vocab, keys, depsVocab
import torch.nn as nn
import torch
from torch.autograd import Variable
# "linearization_logprobability"
def recursivelyLinearize(sentence, position, result, gradients_from_the_left_sum):
line = sentence[position-1]
# Loop Invariant: these are the gradients relevant at everything starting at the left end of the domain of the current element
allGradients = gradients_from_the_left_sum + sum(line.get("children_decisions_logprobs",[]))
if "linearization_logprobability" in line:
allGradients += line["linearization_logprobability"] # the linearization of this element relative to its siblings affects everything starting at the start of the constituent, but nothing to the left of it
else:
assert line["fine_dep"] == "root"
# there are the gradients of its children
if "children_DH" in line:
for child in line["children_DH"]:
allGradients = recursivelyLinearize(sentence, child, result, allGradients)
result.append(line)
line["relevant_logprob_sum"] = allGradients
if "children_HD" in line:
for child in line["children_HD"]:
allGradients = recursivelyLinearize(sentence, child, result, allGradients)
return allGradients
import numpy.random
softmax_layer = torch.nn.Softmax()
logsoftmax = torch.nn.LogSoftmax()
def orderChildrenRelative(sentence, remainingChildren, reverseSoftmax):
childrenLinearized = []
while len(remainingChildren) > 0:
logits = torch.cat([distanceWeights[stoi_deps[sentence[x-1]["dependency_key"]]].view(1) for x in remainingChildren])
softmax = softmax_layer(logits.view(1,-1)).view(-1)
selected = numpy.random.choice(range(0, len(remainingChildren)), p=softmax.data.numpy())
log_probability = torch.log(softmax[selected])
assert "linearization_logprobability" not in sentence[remainingChildren[selected]-1]
sentence[remainingChildren[selected]-1]["linearization_logprobability"] = log_probability
childrenLinearized.append(remainingChildren[selected])
del remainingChildren[selected]
if reverseSoftmax:
childrenLinearized = childrenLinearized[::-1]
return childrenLinearized
def orderSentence(sentence, dhLogits, printThings):
root = None
logits = [None]*len(sentence)
logProbabilityGradient = 0
for line in sentence:
line["fine_dep"] = line["dep"]
if line["fine_dep"] == "root":
root = line["index"]
continue
if line["fine_dep"].startswith("punct"):
continue
key = (sentence[line["head"]-1]["posUni"], line["fine_dep"], line["posUni"]) if line["fine_dep"] != "root" else stoi_deps["root"]
line["dependency_key"] = key
dhLogit = dhWeights[stoi_deps[key]]
probability = 1/(1 + torch.exp(-dhLogit))
dhSampled = (random() < probability.data.numpy())
line["ordering_decision_log_probability"] = torch.log(1/(1 + torch.exp(- (1 if dhSampled else -1) * dhLogit)))
direction = "DH" if dhSampled else "HD"
if printThings:
print "\t".join(map(str,["ORD", line["index"], (line["word"]+" ")[:10], (".".join(list(key)) + " ")[:22], line["head"], dhSampled, direction, (str(float(probability))+" ")[:8], str(1/(1+exp(-dhLogits[key])))[:8], (str(distanceWeights[stoi_deps[key]].data.numpy())+" ")[:8] , str(originalDistanceWeights[key])[:8] ] ))
headIndex = line["head"]-1
sentence[headIndex]["children_"+direction] = (sentence[headIndex].get("children_"+direction, []) + [line["index"]])
sentence[headIndex]["children_decisions_logprobs"] = (sentence[headIndex].get("children_decisions_logprobs", []) + [line["ordering_decision_log_probability"]])
for line in sentence:
if "children_DH" in line:
childrenLinearized = orderChildrenRelative(sentence, line["children_DH"][:], False)
line["children_DH"] = childrenLinearized
if "children_HD" in line:
childrenLinearized = orderChildrenRelative(sentence, line["children_HD"][:], True)
line["children_HD"] = childrenLinearized
linearized = []
overallLogprobSum = recursivelyLinearize(sentence, root, linearized, Variable(torch.FloatTensor([0.0])))
if printThings or len(linearized) == 0:
print " ".join(map(lambda x:x["word"], sentence))
print " ".join(map(lambda x:x["word"], linearized))
# store new dependency links
moved = [None] * len(sentence)
for i, x in enumerate(linearized):
moved[x["index"]-1] = i
for i,x in enumerate(linearized):
if x["head"] == 0: # root
x["reordered_head"] = 0
else:
x["reordered_head"] = 1+moved[x["head"]-1]
return linearized, overallLogprobSum
dhLogits, vocab, vocab_deps, depsVocab = initializeOrderTable()
posUni = list(posUni)
itos_pos_uni = posUni
stoi_pos_uni = dict(zip(posUni, range(len(posUni))))
posFine = list(posFine)
itos_pos_ptb = posFine
stoi_pos_ptb = dict(zip(posFine, range(len(posFine))))
itos_pure_deps = sorted(list(depsVocab))
stoi_pure_deps = dict(zip(itos_pure_deps, range(len(itos_pure_deps))))
itos_deps = sorted(vocab_deps, key=lambda x:x[1])
stoi_deps = dict(zip(itos_deps, range(len(itos_deps))))
print itos_deps
relevantPath = "/u/scr/mhahn/deps/DLM_MEMORY_OPTIMIZED/locality_optimized_dlm/manual_output_funchead_fine_depl/"
import os
files = [x for x in os.listdir(relevantPath) if x.startswith(args.language+"_") and __file__ in x]
posCount = 0
negCount = 0
for name in files:
with open(relevantPath+name, "r") as inFile:
for line in inFile:
line = line.split("\t")
if line[1] == "obj":
dhWeight = float(line[0])
if dhWeight < 0:
negCount += 1
elif dhWeight > 0:
posCount += 1
break
print(["Neg count", negCount, "Pos count", posCount])
#if posCount >= 8 and negCount >= 8:
# print("Enough models!")
# quit()
dhWeights = Variable(torch.FloatTensor([0.0] * len(itos_deps)), requires_grad=True)
distanceWeights = Variable(torch.FloatTensor([0.0] * len(itos_deps)), requires_grad=True)
for i, key in enumerate(itos_deps):
dhLogits[key] = 0.0
if key == "obj":
dhLogits[key] = (10.0 if posCount < negCount else -10.0)
dhWeights.data[i] = dhLogits[key]
originalDistanceWeights[key] = 0.0 #random()
distanceWeights.data[i] = originalDistanceWeights[key]
words = list(vocab.iteritems())
words = sorted(words, key = lambda x:x[1], reverse=True)
itos = map(lambda x:x[0], words)
stoi = dict(zip(itos, range(len(itos))))
if len(itos) > 6:
assert stoi[itos[5]] == 5
vocab_size = 50000
word_embeddings = torch.nn.Embedding(num_embeddings = vocab_size+3, embedding_dim = 1) #.cuda()
pos_u_embeddings = torch.nn.Embedding(num_embeddings = len(posUni)+3, embedding_dim = 1) #.cuda()
pos_p_embeddings = torch.nn.Embedding(num_embeddings = len(posFine)+3, embedding_dim=1) #.cuda()
baseline = nn.Linear(3, 1) #.cuda()
dropout = nn.Dropout(0.5) #.cuda()
components = [word_embeddings, pos_u_embeddings, pos_p_embeddings, baseline] # rnn
def parameters():
for c in components:
for param in c.parameters():
yield param
yield dhWeights
yield distanceWeights
#for pa in parameters():
# print pa
initrange = 0.1
word_embeddings.weight.data.uniform_(-initrange, initrange)
pos_u_embeddings.weight.data.uniform_(-initrange, initrange)
pos_p_embeddings.weight.data.uniform_(-initrange, initrange)
baseline.bias.data.fill_(0)
baseline.weight.data.uniform_(-initrange, initrange)
batchSize = 1
lr_lm = 0.1
crossEntropy = 10.0
def encodeWord(w):
return stoi[w]+3 if stoi[w] < vocab_size else 1
import torch.nn.functional
counter = 0
while True:
corpus = CorpusIterator(args.language, shuffleDataSeed=myID, size=args.size).iterator(rejectShortSentences = True)
while True:
try:
batch = map(lambda x:next(corpus), 10*range(1))
except StopIteration:
break
batch = sorted(batch, key=len)
partitions = range(10)
shuffle(partitions)
for partition in partitions:
if counter > 200000:
print "Quitting at counter "+str(counter)
quit()
counter += 1
printHere = (counter % 50 == 0)
current = batch[partition*1:(partition+1)*1]
assert len(current)==1
batchOrdered, overallLogprobSum = orderSentence(current[0], dhLogits, printHere)
batchOrdered = [batchOrdered]
lengths = map(len, current)
maxLength = lengths[-1]
if maxLength <= 2:
print "Skipping extremely short sentence"
continue
input_words = []
input_pos_u = []
input_pos_p = []
for i in range(maxLength+2):
input_words.append(map(lambda x: 2 if i == 0 else (encodeWord(x[i-1]["word"]) if i <= len(x) else 0), batchOrdered))
input_pos_u.append(map(lambda x: 2 if i == 0 else (stoi_pos_uni[x[i-1]["posUni"]]+3 if i <= len(x) else 0), batchOrdered))
input_pos_p.append(map(lambda x: 2 if i == 0 else (stoi_pos_ptb[x[i-1]["posFine"]]+3 if i <= len(x) else 0), batchOrdered))
loss = 0
wordNum = 0
lossWords = 0
policyGradientLoss = 0
baselineLoss = 0
for c in components:
c.zero_grad()
for p in [dhWeights, distanceWeights]:
if p.grad is not None:
p.grad.data = p.grad.data.mul(args.momentum)
if True:
words_layer = word_embeddings(Variable(torch.LongTensor(input_words))) #.cuda())
pos_u_layer = pos_u_embeddings(Variable(torch.LongTensor(input_pos_u))) #.cuda())
pos_p_layer = pos_p_embeddings(Variable(torch.LongTensor(input_pos_p))) #.cuda())
inputEmbeddings = dropout(torch.cat([words_layer, pos_u_layer, pos_p_layer], dim=2))
baseline_predictions = baseline(inputEmbeddings)
lossesHead = [[Variable(torch.FloatTensor([0.0]))]*1 for i in range(maxLength+1)]
cudaZero = Variable(torch.FloatTensor([0.0]), requires_grad=False)
for i in range(1,len(input_words)):
for j in range(1):
if input_words[i][j] != 0:
if batchOrdered[j][i-1]["head"] == 0:
realHead = 0
else:
realHead = batchOrdered[j][i-1]["reordered_head"]
if batchOrdered[j][i-1]["fine_dep"] == "root":
continue
# to make sure reward attribution considers this correctly
registerAt = max(i, realHead)
depLength = abs(i - realHead)
assert depLength >= 0
baselineLoss += torch.nn.functional.mse_loss(baseline_predictions[i][j] + baseline_predictions[realHead][j], depLength + cudaZero )
depLengthMinusBaselines = depLength - baseline_predictions[i][j] - baseline_predictions[realHead][j]
lossesHead[registerAt][j] += depLengthMinusBaselines
lossWords += depLength
for i in range(1,len(input_words)):
for j in range(1):
if input_words[i][j] != 0:
policyGradientLoss += overallLogprobSum * ((lossesHead[i][j]).detach().cpu())
if input_words[i] > 2 and j == 0 and printHere:
print [itos[input_words[i][j]-3], itos_pos_ptb[input_pos_p[i][j]-3], "Cumul_DepL_Minus_Baselines", lossesHead[i][j].data.cpu().numpy()[0], "Baseline Here", baseline_predictions[i][j].data.cpu().numpy()[0]]
wordNum += 1
if wordNum == 0:
print input_words
print batchOrdered
continue
if printHere:
print loss/wordNum
print lossWords/wordNum
print ["CROSS ENTROPY", crossEntropy, exp(crossEntropy)]
crossEntropy = 0.99 * crossEntropy + 0.01 * (lossWords/wordNum)
probabilities = torch.sigmoid(dhWeights)
neg_entropy = torch.sum( probabilities * torch.log(probabilities) + (1-probabilities) * torch.log(1-probabilities))
policy_related_loss = args.lr_policy * (args.entropy_weight * neg_entropy + policyGradientLoss) # lives on CPU
if printHere:
print "BACKWARD 1"
policy_related_loss.backward()
if printHere:
print "BACKWARD 2"
loss += baselineLoss # lives on GPU
if loss is 0:
print "Absolutely Zero Loss"
print current
continue
loss.backward()
if printHere:
print "BACKWARD 3 "+__file__+" "+args.language+" "+str(args.size)+" "+str(myID)+" "+str(counter)
torch.nn.utils.clip_grad_norm(parameters(), 5.0, norm_type='inf')
for param in parameters():
if param.grad is None:
print "WARNING: None gradient"
continue
param.data.sub_(lr_lm * param.grad.data)
if counter % 10000 == 0:
TARGET_DIR = "/u/scr/mhahn/deps/DLM_MEMORY_OPTIMIZED/locality_optimized_dlm/"
print "Saving"
with open(TARGET_DIR+"/manual_output_funchead_fine_depl_size/"+args.language+"-"+str(args.size)+"_"+__file__+"_model_"+str(myID)+".tsv", "w") as outFile:
print >> outFile, "\t".join(map(str,["DH_Weight","CoarseDependency","HeadPOS", "DependentPOS", "DistanceWeight", "Language", "FileName"]))
for i in range(len(itos_deps)):
head, rel, dependent = itos_deps[i]
dhWeight = dhWeights[i].data.numpy()
distanceWeight = distanceWeights[i].data.numpy()
print >> outFile, "\t".join(map(str,[round(dhWeight, 5), rel, head, dependent, round(distanceWeight, 5), args.language, myID]))
| [
"mhahn29@gmail.com"
] | mhahn29@gmail.com |
127a2f8601a9feda61e6ceb4404d4bf7bc2cd78d | 05c95c0fad58a65a8a73595d2f2fd13c5f78e2fe | /gym/gym/settings.py | 59ba6daedd854fc36183ce6344fca5f935207798 | [] | no_license | Kanchan528/gym-website | 572469445a23eda626aaea5c0629112468ee80d0 | 9e3a470115c6c44a8318af451f4ee0bc24c24330 | refs/heads/master | 2022-05-27T07:59:03.002294 | 2020-04-10T08:55:00 | 2020-04-10T08:55:00 | 254,588,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,232 | py | """
Django settings for gym project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-r)%7-q63j^)ylo9vktenq8qz1a-j=*@pi(zs0c9q9jjm-(s)f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'website'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gym.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gym.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"kanchankharel528@gmail.com"
] | kanchankharel528@gmail.com |
e1fc9196ca7dcab3aa96d0b7c34cb560ea74883c | dc6a37efdacf41babc0049d8ed8fb724fde4ca4b | /webcrawl/naverkospi.py | 850837dbd9dffc89d48278b8871bc5ec0d0552dd | [] | no_license | imjwpak/tensor_Test | 345ea238daa520acd62a7bc1af561c0d5ea286fa | a359ba4700251cfab3b031b3ade36cc5fc643207 | refs/heads/master | 2020-07-02T14:36:55.514042 | 2019-08-10T09:12:53 | 2019-08-10T09:12:53 | 201,559,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from bs4 import BeautifulSoup
import urllib.request as url
class NaverKospiCrawler:
def __init__(self, param):
self.param = param
def scrap(self):
html = url.urlopen(self.param).read()
soup = BeautifulSoup(html, 'html.parser')
txt = soup.find(id = 'KOSPI_now').text
print('코스피 : ' + txt)
| [
"imjwpak83@naver.com"
] | imjwpak83@naver.com |
c2a0142532c21e7d40a2b1033968ff79402e01eb | b266de2df8a6050173b2f97db8d7167e92258837 | /Blender/src/babylon_js/materials/nodes/principled.py | 7965b3bfefbb6fcd4200b27ad9e482e044d5b573 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | cloud173/Exporters | 6298dbdd0a0de25b261d3db43981d960e0078721 | ced3cbdb3e94f0e1aee3af65349264f5252e0cea | refs/heads/master | 2020-06-02T10:23:15.061684 | 2019-06-07T17:23:58 | 2019-06-07T17:23:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py | from .abstract import *
from mathutils import Color
#===============================================================================
class PrincipledBJSNode(AbstractBJSNode):
bpyType = 'ShaderNodeBsdfPrincipled'
def __init__(self, bpyNode, socketName):
super().__init__(bpyNode, socketName)
input = self.findInput('Base Color')
defaultDiffuse = self.findTexture(input, DIFFUSE_TEX)
if defaultDiffuse is not None:
self.diffuseColor = Color((defaultDiffuse[0], defaultDiffuse[1], defaultDiffuse[2]))
self.diffuseAlpha = defaultDiffuse[3]
self.mustBakeDiffuse = input.mustBake if isinstance(input, AbstractBJSNode) else False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
input = self.findInput('Metallic')
defaultMetallic = self.findTexture(input, METAL_TEX)
if defaultMetallic is not None:
self.metallic = defaultMetallic
self.mustBakeMetal = input.mustBake if isinstance(input, AbstractBJSNode) else False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
input = self.findInput('Specular')
defaultSpecular = self.findTexture(input, SPECULAR_TEX)
if defaultSpecular is not None:
self.specularColor = Color((defaultSpecular, defaultSpecular, defaultSpecular))
self.mustBakeSpecular = input.mustBake if isinstance(input, AbstractBJSNode) else False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
input = self.findInput('Roughness')
defaultRoughness = self.findTexture(input, ROUGHNESS_TEX)
if defaultRoughness is not None:
self.roughness = defaultRoughness
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
input = self.findInput('IOR')
defaultIOR = self.findTexture(input, REFRACTION_TEX)
if defaultIOR is not None:
self.indexOfRefraction = defaultIOR
self.mustBakeRefraction = input.mustBake if isinstance(input, AbstractBJSNode) else False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
input = self.findInput('Normal')
self.findTexture(input, BUMP_TEX)
self.mustBakeNormal = input.mustBake if isinstance(input, AbstractBJSNode) else False
| [
"jeffrey.c.palmer@gmail.com"
] | jeffrey.c.palmer@gmail.com |
d8702593c2313af5f17e349975da77bfc2807259 | 25817954931bbd431681bca1c62e48c98c84f12a | /tests/test_lammps_proc.py | 1fb6f0c85c711fee350b10b3742502399b73ad68 | [
"BSD-3-Clause"
] | permissive | JingyuLeo/md_utils | 6ddf35af533a4dd2a94f827389b41668bb97aeec | c74cab20f46b0adb98bdc9d2a19b3d21ff9d7516 | refs/heads/master | 2021-05-25T08:58:41.955067 | 2018-10-14T21:34:38 | 2018-10-14T21:34:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,263 | py | # coding=utf-8
"""
Tests for lammps_proc.py.
"""
import os
import unittest
from md_utils.lammps_proc import main, WAT_H_TYPE, WAT_O_TYPE, PROT_O_IDS, H3O_O_TYPE, H3O_H_TYPE
from md_utils.md_common import capture_stdout, capture_stderr, diff_lines, silent_remove
import logging
# logging.basicConfig(level=logging.DEBUG)
# logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
DISABLE_REMOVE = logger.isEnabledFor(logging.DEBUG)
__author__ = 'hmayes'
DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
SUB_DATA_DIR = os.path.join(DATA_DIR, 'lammps_proc')
# Check catching error
NO_ACTION_INI_PATH = os.path.join(SUB_DATA_DIR, 'hstar_o_gofr_no_action.ini')
INCOMP_INI_PATH = os.path.join(SUB_DATA_DIR, 'lammps_proc_data_incomp.ini')
INVALID_INI = os.path.join(SUB_DATA_DIR, 'lammps_proc_data_invalid.ini')
WRONG_CARB_O_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_wrong_carb_o.ini')
WRONG_HYD_O_TYPE_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_wrong_hyd_o_type.ini')
WRONG_HYD_H_TYPE_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_wrong_hyd_h_type.ini')
WRONG_WAT_O_TYPE_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_wrong_wat_o_type.ini')
WRONG_WAT_H_TYPE_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_wrong_wat_h_type.ini')
EXTRA_HYD_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_extra_hyd_atoms.ini')
HYD_AND_H_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_hyd_and_h.ini')
OH_DIST_INI = os.path.join(SUB_DATA_DIR, 'hydroxyl_oh_dist.ini')
# noinspection PyUnresolvedReferences
DEF_OUT = os.path.join(SUB_DATA_DIR, 'glue_sum.csv')
GOOD_OH_DIST_OUT = os.path.join(SUB_DATA_DIR, 'glue_oh_dist_good.csv')
EMPTY_LIST_INI = os.path.join(SUB_DATA_DIR, 'lammps_proc_empty_list.ini')
MISS_DUMP_INI = os.path.join(SUB_DATA_DIR, 'lammps_proc_data_missing_dump.ini')
BAD_DUMP_INI = os.path.join(SUB_DATA_DIR, 'lammps_proc_data_bad_dump.ini')
INCOMP_GOFR_INI_PATH = os.path.join(SUB_DATA_DIR, 'hstar_o_gofr_missing_delta_r.ini')
INCOMP_DUMP_INI_PATH = os.path.join(SUB_DATA_DIR, 'hstar_o_gofr_incomp_dump.ini')
INCOMP_PROT_O_INI = os.path.join(SUB_DATA_DIR, 'lammps_proc_miss_prot_oxys.ini')
HO_GOFR_INI_PATH = os.path.join(SUB_DATA_DIR, 'hstar_o_gofr.ini')
GOOD_HO_GOFR_OUT_PATH = os.path.join(SUB_DATA_DIR, 'glue_gofr_ho_good.csv')
OO_GOFR_INI_PATH = os.path.join(SUB_DATA_DIR, 'ostar_o_gofr.ini')
GOOD_OO_GOFR_OUT_PATH = os.path.join(SUB_DATA_DIR, 'glue_gofr_oo_good.csv')
HH_GOFR_INI_PATH = os.path.join(SUB_DATA_DIR, 'hstar_h_gofr.ini')
GOOD_HH_GOFR_OUT_PATH = os.path.join(SUB_DATA_DIR, 'glue_gofr_hh_good.csv')
OH_GOFR_INI_PATH = os.path.join(SUB_DATA_DIR, 'ostar_h_gofr.ini')
GOOD_OH_GOFR_OUT_PATH = os.path.join(SUB_DATA_DIR, 'glue_gofr_oh_good.csv')
HO_OO_HH_OH_GOFR_INI = os.path.join(SUB_DATA_DIR, 'ho_oo_hh_oh_gofr.ini')
GOOD_HO_OO_HH_OH_GOFR_OUT = os.path.join(SUB_DATA_DIR, 'glue_gofr_ho_oo_hh_oh_good.csv')
HO_OO_HH_OH_GOFR_INI_MAX_STEPS = os.path.join(SUB_DATA_DIR, 'ho_oo_hh_oh_gofr_max_steps.ini')
GOOD_HO_OO_HH_OH_GOFR_OUT_MAX_STEPS = os.path.join(SUB_DATA_DIR, 'glue_dump_long_gofrs_good.csv')
# noinspection PyUnresolvedReferences
DEF_GOFR_OUT = os.path.join(SUB_DATA_DIR, 'glue_dump_gofrs.csv')
# noinspection PyUnresolvedReferences
DEF_GOFR_INCOMP_OUT = os.path.join(SUB_DATA_DIR, 'glue_dump_incomp_gofrs.csv')
# noinspection PyUnresolvedReferences
DEF_MAX_STEPS_OUT = os.path.join(SUB_DATA_DIR, 'glue_dump_long_gofrs.csv')
HIJ_INI = os.path.join(SUB_DATA_DIR, 'calc_hij.ini')
# noinspection PyUnresolvedReferences
HIJ_OUT = os.path.join(SUB_DATA_DIR, 'glu_prot_deprot_sum.csv')
GOOD_HIJ_OUT = os.path.join(SUB_DATA_DIR, 'glu_prot_deprot_proc_data_good.csv')
WAT_HYD_INI = os.path.join(SUB_DATA_DIR, 'calc_wat_hyd.ini')
GOOD_WAT_HYD_OUT = os.path.join(SUB_DATA_DIR, 'glu_prot_deprot_wat_hyd_good.csv')
HIJ_ARQ_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_arq.ini')
# noinspection PyUnresolvedReferences
HIJ_ARQ_OUT = os.path.join(SUB_DATA_DIR, 'glue_revised_sum.csv')
GOOD_HIJ_ARQ_OUT = os.path.join(SUB_DATA_DIR, 'glue_revised_arq_good.csv')
HIJ_NEW_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_arq_new.ini')
GOOD_HIJ_NEW_OUT = os.path.join(SUB_DATA_DIR, 'glue_revised_new_hij_good.csv')
HIJ_NEW_GLU2_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_glu_arq_new.ini')
HIJ_NEW_GLU2_OUT = os.path.join(SUB_DATA_DIR, 'gluprot10_10no_evb_sum.csv')
GOOD_HIJ_NEW_GLU2_OUT = os.path.join(SUB_DATA_DIR, 'gluprot10_10no_evb_sum_good.csv')
HIJ_NEW_MISS_PARAM_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_arq_new_missing_param.ini')
HIJ_NEW_NONFLOAT_PARAM_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_arq_new_non_float_param.ini')
CALC_GLU_PROPS_INI = os.path.join(SUB_DATA_DIR, 'calc_glu_props.ini')
GOOD_GLU_PROPS_OUT = os.path.join(SUB_DATA_DIR, 'gluprot10_10no_evb_oco_good.csv')
COMBINE_CEC_INI = os.path.join(SUB_DATA_DIR, 'calc_cec_dist.ini')
COMBINE_CEC_OUT = os.path.join(SUB_DATA_DIR, '2.400_320_short_sum.csv')
GOOD_COMBINE_CEC_OUT = os.path.join(SUB_DATA_DIR, '2.400_320_short_sum_good.csv')
COMBINE_CEC_MULTI_FILE_INI = os.path.join(SUB_DATA_DIR, 'calc_cec_dist_multifile.ini')
COMBINE_CEC_MULTI_FILE_OUT = os.path.join(SUB_DATA_DIR, 'gluprot1min_dump_sum.csv')
GOOD_COMBINE_CEC_MULTI_FILE_OUT = os.path.join(SUB_DATA_DIR, 'gluprot1min_dump_sum_good.csv')
COMBINE_CEC_ONLY_STEPS_INI = os.path.join(SUB_DATA_DIR, 'calc_cec_dist_restrict_timesteps.ini')
COMBINE_CEC_ONLY_STEPS_OUT = os.path.join(SUB_DATA_DIR, '2.400_320_short_sum.csv')
GOOD_COMBINE_CEC_ONLY_STEPS_OUT = os.path.join(SUB_DATA_DIR, '2.400_320_restrict_timestep_good.csv')
HIJ_ARQ6_GLU2_INI = os.path.join(SUB_DATA_DIR, 'calc_hij_arq6.ini')
good_long_out_msg = 'md_utils/tests/test_data/lammps_proc/glue_dump_long_gofrs.csv\nReached the maximum timesteps ' \
'per dumpfile (20). To increase this number, set a larger value for max_timesteps_per_dumpfile. ' \
'Continuing program.\nCompleted reading'
class TestLammpsProcDataNoOutput(unittest.TestCase):
# These tests only check for (hopefully) helpful messages
def testHelp(self):
test_input = ['-h']
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertFalse(output)
with capture_stdout(main, test_input) as output:
self.assertTrue("optional arguments" in output)
def testNoInputFile(self):
with capture_stderr(main) as output:
self.assertTrue("Problems reading file: Could not read file" in output)
with capture_stdout(main) as output:
self.assertTrue("optional arguments" in output)
def testInvalidData(self):
test_input = ["-c", INVALID_INI]
with capture_stderr(main, test_input) as output:
self.assertTrue("Problem with config vals on key h3o_o_type: invalid literal for int" in output)
with capture_stdout(main, test_input) as output:
self.assertTrue("optional arguments" in output)
def testNoAction(self):
test_input = ["-c", NO_ACTION_INI_PATH]
with capture_stderr(main, test_input) as output:
self.assertTrue("No calculations have been requested" in output)
with capture_stdout(main, test_input) as output:
self.assertTrue("optional arguments" in output)
def testMissDump(self):
with capture_stderr(main, ["-c", MISS_DUMP_INI]) as output:
self.assertTrue("No such file or directory" in output)
def testBadDump(self):
with capture_stderr(main, ["-c", BAD_DUMP_INI]) as output:
self.assertTrue("invalid literal for int()" in output)
def testMissingConfig(self):
test_input = ["-c", INCOMP_INI_PATH]
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("Missing config val" in output)
with capture_stdout(main, test_input) as output:
self.assertTrue("optional arguments" in output)
def testIncompProtType(self):
with capture_stderr(main, ["-c", INCOMP_PROT_O_INI]) as output:
self.assertTrue("WARNING" in output)
self.assertTrue("Expected to find exactly" in output)
def testNegGofR(self):
with capture_stderr(main, ["-c", INCOMP_GOFR_INI_PATH]) as output:
self.assertTrue("a positive value" in output)
def testHydAndH(self):
with capture_stderr(main, ["-c", HYD_AND_H_INI]) as output:
for expected in [' 3 ', 'Excess proton', H3O_O_TYPE, H3O_H_TYPE]:
self.assertTrue(expected in output)
def testExtraHydAtoms(self):
with capture_stderr(main, ["-c", EXTRA_HYD_INI]) as output:
for expected in [' 7 ', 'No excess proton', H3O_O_TYPE, H3O_H_TYPE]:
self.assertTrue(expected in output)
def testFindNoCarbO(self):
with capture_stderr(main, ["-c", WRONG_CARB_O_INI]) as output:
self.assertTrue(PROT_O_IDS in output)
def testWrongHydH(self):
with capture_stderr(main, ["-c", WRONG_HYD_H_TYPE_INI]) as output:
for expected in [' 1 ', 'No excess proton', H3O_O_TYPE, H3O_H_TYPE]:
self.assertTrue(expected in output)
def testWrongHydO(self):
with capture_stderr(main, ["-c", WRONG_HYD_O_TYPE_INI]) as output:
for expected in [' 3 ', 'No excess proton', H3O_O_TYPE, H3O_H_TYPE]:
self.assertTrue(expected in output)
def testFindNoWatO(self):
with capture_stderr(main, ["-c", WRONG_WAT_O_TYPE_INI]) as output:
self.assertTrue(WAT_O_TYPE in output)
self.assertTrue("no such atoms were found" in output)
def testFindNoWatH(self):
test_input = ["-c", WRONG_WAT_H_TYPE_INI]
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue(WAT_H_TYPE in output)
self.assertTrue("no such atoms were found" in output)
def testEmptyList(self):
test_input = ["-c", EMPTY_LIST_INI]
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("Found no dump files to process" in output)
def testMissNewHIJMissingParam(self):
test_input = ["-c", HIJ_NEW_MISS_PARAM_INI]
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("Missing input value for key" in output)
def testMissNewHIJNonfloatParam(self):
test_input = ["-c", HIJ_NEW_NONFLOAT_PARAM_INI]
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("Require float inputs for keys" in output)
class TestLammpsProcData(unittest.TestCase):
def testOHDist(self):
num = 12
print(isinstance(num, float))
try:
main(["-c", OH_DIST_INI])
self.assertFalse(diff_lines(DEF_OUT, GOOD_OH_DIST_OUT))
finally:
silent_remove(DEF_OUT, disable=DISABLE_REMOVE)
def testMaxTimestepsCalcHIJ(self):
try:
with capture_stdout(main, ["-c", HIJ_INI]) as output:
self.assertTrue("Reached the maximum timesteps" in output)
self.assertFalse(diff_lines(HIJ_OUT, GOOD_HIJ_OUT))
finally:
silent_remove(HIJ_OUT, disable=DISABLE_REMOVE)
def testMaxTimestepsCalcWatHyd(self):
try:
main(["-c", WAT_HYD_INI])
self.assertFalse(diff_lines(HIJ_OUT, GOOD_WAT_HYD_OUT))
finally:
silent_remove(HIJ_OUT, disable=DISABLE_REMOVE)
def testHIJArq(self):
# Test calculating the Maupin form
try:
test_input = ["-c", HIJ_ARQ_INI]
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("did not have the full list of atom numbers" in output)
self.assertFalse(diff_lines(HIJ_ARQ_OUT, GOOD_HIJ_ARQ_OUT))
finally:
silent_remove(HIJ_ARQ_OUT, disable=DISABLE_REMOVE)
def testIncompDump(self):
try:
with capture_stderr(main, ["-c", INCOMP_DUMP_INI_PATH]) as output:
self.assertTrue("WARNING" in output)
self.assertFalse(diff_lines(DEF_GOFR_INCOMP_OUT, GOOD_HO_GOFR_OUT_PATH))
finally:
silent_remove(DEF_GOFR_INCOMP_OUT, disable=DISABLE_REMOVE)
def testHOGofR(self):
try:
main(["-c", HO_GOFR_INI_PATH])
self.assertFalse(diff_lines(DEF_GOFR_OUT, GOOD_HO_GOFR_OUT_PATH))
finally:
silent_remove(DEF_GOFR_OUT, disable=DISABLE_REMOVE)
def testOOGofR(self):
try:
main(["-c", OO_GOFR_INI_PATH])
self.assertFalse(diff_lines(DEF_GOFR_OUT, GOOD_OO_GOFR_OUT_PATH))
finally:
silent_remove(DEF_GOFR_OUT, disable=DISABLE_REMOVE)
def testHHGofR(self):
try:
main(["-c", HH_GOFR_INI_PATH])
self.assertFalse(diff_lines(DEF_GOFR_OUT, GOOD_HH_GOFR_OUT_PATH))
finally:
silent_remove(DEF_GOFR_OUT, disable=DISABLE_REMOVE)
def testOHGofR(self):
try:
main(["-c", OH_GOFR_INI_PATH])
self.assertFalse(diff_lines(DEF_GOFR_OUT, GOOD_OH_GOFR_OUT_PATH))
finally:
silent_remove(DEF_GOFR_OUT, disable=DISABLE_REMOVE)
def testHO_OO_HH_OHGofR(self):
try:
main(["-c", HO_OO_HH_OH_GOFR_INI])
self.assertFalse(diff_lines(DEF_GOFR_OUT, GOOD_HO_OO_HH_OH_GOFR_OUT))
finally:
silent_remove(DEF_GOFR_OUT, disable=DISABLE_REMOVE)
def testHO_OO_HH_OHGofR_MaxSteps(self):
test_input = ["-c", HO_OO_HH_OH_GOFR_INI_MAX_STEPS]
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
try:
with capture_stdout(main, test_input) as output:
self.assertTrue(good_long_out_msg in output)
self.assertFalse(diff_lines(DEF_MAX_STEPS_OUT, GOOD_HO_OO_HH_OH_GOFR_OUT_MAX_STEPS))
finally:
silent_remove(DEF_MAX_STEPS_OUT, disable=DISABLE_REMOVE)
def testHIJArqNew(self):
# Test calculating the Maupin form
try:
test_input = ["-c", HIJ_NEW_INI]
main(test_input)
self.assertFalse(diff_lines(HIJ_ARQ_OUT, GOOD_HIJ_NEW_OUT))
finally:
silent_remove(HIJ_ARQ_OUT, disable=DISABLE_REMOVE)
def testHIJArqNew2(self):
# Test calculating the Maupin form
try:
test_input = ["-c", HIJ_NEW_GLU2_INI, "-p"]
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
# because i've turned off printing, there should be no output
with capture_stdout(main, test_input) as output:
self.assertFalse(output)
self.assertFalse(diff_lines(HIJ_NEW_GLU2_OUT, GOOD_HIJ_NEW_GLU2_OUT))
finally:
silent_remove(HIJ_NEW_GLU2_OUT, disable=DISABLE_REMOVE)
def testCalcProps(self):
try:
test_input = ["-c", CALC_GLU_PROPS_INI, "-p"]
main(test_input)
self.assertFalse(diff_lines(HIJ_NEW_GLU2_OUT, GOOD_GLU_PROPS_OUT))
finally:
silent_remove(HIJ_NEW_GLU2_OUT, disable=DISABLE_REMOVE)
def testCombineCEC(self):
try:
test_input = ["-c", COMBINE_CEC_INI]
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("Did not find 'timestep' value" in output)
self.assertFalse(diff_lines(COMBINE_CEC_OUT, GOOD_COMBINE_CEC_OUT))
finally:
silent_remove(COMBINE_CEC_OUT, disable=DISABLE_REMOVE)
def testCombineCECMultifile(self):
try:
test_input = ["-c", COMBINE_CEC_MULTI_FILE_INI]
main(test_input)
self.assertFalse(diff_lines(COMBINE_CEC_MULTI_FILE_OUT, GOOD_COMBINE_CEC_MULTI_FILE_OUT))
finally:
silent_remove(COMBINE_CEC_MULTI_FILE_OUT, disable=DISABLE_REMOVE)
def testCombineCECRestrictTimesteps(self):
try:
test_input = ["-c", COMBINE_CEC_ONLY_STEPS_INI]
main(test_input)
self.assertFalse(diff_lines(COMBINE_CEC_ONLY_STEPS_OUT, GOOD_COMBINE_CEC_ONLY_STEPS_OUT))
finally:
silent_remove(COMBINE_CEC_ONLY_STEPS_OUT, disable=DISABLE_REMOVE)
def testHIJArq6(self):
# Test calculating the Maupin form
try:
test_input = ["-c", HIJ_ARQ6_GLU2_INI, "-p"]
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
# because i've turned off printing, there should be no output
with capture_stdout(main, test_input) as output:
self.assertFalse(output)
self.assertFalse(diff_lines(HIJ_NEW_GLU2_OUT, GOOD_HIJ_NEW_GLU2_OUT))
finally:
silent_remove(HIJ_NEW_GLU2_OUT, disable=DISABLE_REMOVE)
| [
"hmayes@hmayes.com"
] | hmayes@hmayes.com |
1933beba2772c29f9693177e31151b7c62b2f90e | fdbb74a95924e2677466614f6ab6e2bb13b2a95a | /third_party/python/Lib/test/string_tests.py | cd3ee48a92bb7d3b6dc7516cee8c0369f121884a | [
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"ISC"
] | permissive | jart/cosmopolitan | fb11b5658939023977060a7c6c71a74093d9cb44 | 0d748ad58e1063dd1f8560f18a0c75293b9415b7 | refs/heads/master | 2023-09-06T09:17:29.303607 | 2023-09-02T03:49:13 | 2023-09-02T03:50:18 | 272,457,606 | 11,887 | 435 | ISC | 2023-09-14T17:47:58 | 2020-06-15T14:16:13 | C | UTF-8 | Python | false | false | 65,442 | py | """
Common tests shared by test_unicode, test_userstring and test_bytes.
"""
import unittest, string, sys, struct
from test import support
from collections import UserList
class Sequence:
def __init__(self, seq='wxyz'): self.seq = seq
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
class BadSeq1(Sequence):
def __init__(self): self.seq = [7, 'hello', 123]
def __str__(self): return '{0} {1} {2}'.format(*self.seq)
class BadSeq2(Sequence):
def __init__(self): self.seq = ['a', 'b', 'c']
def __len__(self): return 8
class BaseTest:
# These tests are for buffers of values (bytes) and not
# specific to character interpretation, used for bytes objects
# and various string implementations
# The type to be tested
# Change in subclasses to change the behaviour of fixtesttype()
type2test = None
# Whether the "contained items" of the container are integers in
# range(0, 256) (i.e. bytes, bytearray) or strings of length 1
# (str)
contains_bytes = False
# All tests pass their arguments to the testing methods
# as str objects. fixtesttype() can be used to propagate
# these arguments to the appropriate type
def fixtype(self, obj):
if isinstance(obj, str):
return self.__class__.type2test(obj)
elif isinstance(obj, list):
return [self.fixtype(x) for x in obj]
elif isinstance(obj, tuple):
return tuple([self.fixtype(x) for x in obj])
elif isinstance(obj, dict):
return dict([
(self.fixtype(key), self.fixtype(value))
for (key, value) in obj.items()
])
else:
return obj
def test_fixtype(self):
self.assertIs(type(self.fixtype("123")), self.type2test)
# check that obj.method(*args) returns result
def checkequal(self, result, obj, methodname, *args, **kwargs):
result = self.fixtype(result)
obj = self.fixtype(obj)
args = self.fixtype(args)
kwargs = {k: self.fixtype(v) for k,v in kwargs.items()}
realresult = getattr(obj, methodname)(*args, **kwargs)
self.assertEqual(
result,
realresult
)
# if the original is returned make sure that
# this doesn't happen with subclasses
if obj is realresult:
try:
class subtype(self.__class__.type2test):
pass
except TypeError:
pass # Skip this if we can't subclass
else:
obj = subtype(obj)
realresult = getattr(obj, methodname)(*args)
self.assertIsNot(obj, realresult)
# check that obj.method(*args) raises exc
def checkraises(self, exc, obj, methodname, *args):
obj = self.fixtype(obj)
args = self.fixtype(args)
with self.assertRaises(exc) as cm:
getattr(obj, methodname)(*args)
self.assertNotEqual(str(cm.exception), '')
# call obj.method(*args) without any checks
def checkcall(self, obj, methodname, *args):
obj = self.fixtype(obj)
args = self.fixtype(args)
getattr(obj, methodname)(*args)
def test_count(self):
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(2, 'aaa', 'count', 'a', 1)
self.checkequal(0, 'aaa', 'count', 'a', 10)
self.checkequal(1, 'aaa', 'count', 'a', -1)
self.checkequal(3, 'aaa', 'count', 'a', -10)
self.checkequal(1, 'aaa', 'count', 'a', 0, 1)
self.checkequal(3, 'aaa', 'count', 'a', 0, 10)
self.checkequal(2, 'aaa', 'count', 'a', 0, -1)
self.checkequal(0, 'aaa', 'count', 'a', 0, -10)
self.checkequal(3, 'aaa', 'count', '', 1)
self.checkequal(1, 'aaa', 'count', '', 3)
self.checkequal(0, 'aaa', 'count', '', 10)
self.checkequal(2, 'aaa', 'count', '', -1)
self.checkequal(4, 'aaa', 'count', '', -10)
self.checkequal(1, '', 'count', '')
self.checkequal(0, '', 'count', '', 1, 1)
self.checkequal(0, '', 'count', '', sys.maxsize, 0)
self.checkequal(0, '', 'count', 'xx')
self.checkequal(0, '', 'count', 'xx', 1, 1)
self.checkequal(0, '', 'count', 'xx', sys.maxsize, 0)
self.checkraises(TypeError, 'hello', 'count')
if self.contains_bytes:
self.checkequal(0, 'hello', 'count', 42)
else:
self.checkraises(TypeError, 'hello', 'count', 42)
# For a variety of combinations,
# verify that str.count() matches an equivalent function
# replacing all occurrences and then differencing the string lengths
charset = ['', 'a', 'b']
digits = 7
base = len(charset)
teststrings = set()
for i in range(base ** digits):
entry = []
for j in range(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = [self.fixtype(ts) for ts in teststrings]
for i in teststrings:
n = len(i)
for j in teststrings:
r1 = i.count(j)
if j:
r2, rem = divmod(n - len(i.replace(j, self.fixtype(''))),
len(j))
else:
r2, rem = len(i)+1, 0
if rem or r1 != r2:
self.assertEqual(rem, 0, '%s != 0 for %s' % (rem, i))
self.assertEqual(r1, r2, '%s != %s for %s' % (r1, r2, i))
def test_find(self):
self.checkequal(0, 'abcdefghiabc', 'find', 'abc')
self.checkequal(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequal(-1, 'abcdefghiabc', 'find', 'def', 4)
self.checkequal(0, 'abc', 'find', '', 0)
self.checkequal(3, 'abc', 'find', '', 3)
self.checkequal(-1, 'abc', 'find', '', 4)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'find', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'find')
if self.contains_bytes:
self.checkequal(-1, 'hello', 'find', 42)
else:
self.checkraises(TypeError, 'hello', 'find', 42)
self.checkequal(0, '', 'find', '')
self.checkequal(-1, '', 'find', '', 1, 1)
self.checkequal(-1, '', 'find', '', sys.maxsize, 0)
self.checkequal(-1, '', 'find', 'xx')
self.checkequal(-1, '', 'find', 'xx', 1, 1)
self.checkequal(-1, '', 'find', 'xx', sys.maxsize, 0)
# issue 7458
self.checkequal(-1, 'ab', 'find', 'xxx', sys.maxsize + 1, 0)
# For a variety of combinations,
# verify that str.find() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in range(base ** digits):
entry = []
for j in range(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = [self.fixtype(ts) for ts in teststrings]
for i in teststrings:
for j in teststrings:
loc = i.find(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
def test_rfind(self):
self.checkequal(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequal(12, 'abcdefghiabc', 'rfind', '')
self.checkequal(0, 'abcdefghiabc', 'rfind', 'abcd')
self.checkequal(-1, 'abcdefghiabc', 'rfind', 'abcz')
self.checkequal(3, 'abc', 'rfind', '', 0)
self.checkequal(3, 'abc', 'rfind', '', 3)
self.checkequal(-1, 'abc', 'rfind', '', 4)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'rfind', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rfind', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rfind')
if self.contains_bytes:
self.checkequal(-1, 'hello', 'rfind', 42)
else:
self.checkraises(TypeError, 'hello', 'rfind', 42)
# For a variety of combinations,
# verify that str.rfind() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in range(base ** digits):
entry = []
for j in range(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = [self.fixtype(ts) for ts in teststrings]
for i in teststrings:
for j in teststrings:
loc = i.rfind(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
# issue 7458
self.checkequal(-1, 'ab', 'rfind', 'xxx', sys.maxsize + 1, 0)
# issue #15534
self.checkequal(0, '<......\u043c...', "rfind", "<")
def test_index(self):
self.checkequal(0, 'abcdefghiabc', 'index', '')
self.checkequal(3, 'abcdefghiabc', 'index', 'def')
self.checkequal(0, 'abcdefghiabc', 'index', 'abc')
self.checkequal(9, 'abcdefghiabc', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghiabc', 'index', 'hib')
self.checkraises(ValueError, 'abcdefghiab', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', 8)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', -1)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'index', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'index')
if self.contains_bytes:
self.checkraises(ValueError, 'hello', 'index', 42)
else:
self.checkraises(TypeError, 'hello', 'index', 42)
def test_rindex(self):
self.checkequal(12, 'abcdefghiabc', 'rindex', '')
self.checkequal(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequal(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequal(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghiabc', 'rindex', 'hib')
self.checkraises(ValueError, 'defghiabc', 'rindex', 'def', 1)
self.checkraises(ValueError, 'defghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, 8)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, -1)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'rindex', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rindex', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rindex')
if self.contains_bytes:
self.checkraises(ValueError, 'hello', 'rindex', 42)
else:
self.checkraises(TypeError, 'hello', 'rindex', 42)
def test_lower(self):
self.checkequal('hello', 'HeLLo', 'lower')
self.checkequal('hello', 'hello', 'lower')
self.checkraises(TypeError, 'hello', 'lower', 42)
def test_upper(self):
self.checkequal('HELLO', 'HeLLo', 'upper')
self.checkequal('HELLO', 'HELLO', 'upper')
self.checkraises(TypeError, 'hello', 'upper', 42)
def test_expandtabs(self):
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi',
'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi',
'expandtabs', 8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi',
'expandtabs', 4)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi',
'expandtabs')
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi',
'expandtabs', 8)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi',
'expandtabs', 4)
self.checkequal('abc\r\nab\r\ndef\ng\r\nhi', 'abc\r\nab\r\ndef\ng\r\nhi',
'expandtabs', 4)
# check keyword args
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi',
'expandtabs', tabsize=8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi',
'expandtabs', tabsize=4)
self.checkequal(' a\n b', ' \ta\n\tb', 'expandtabs', 1)
self.checkraises(TypeError, 'hello', 'expandtabs', 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxsize < (1 << 32) and struct.calcsize('P') == 4:
self.checkraises(OverflowError,
'\ta\n\tb', 'expandtabs', sys.maxsize)
def test_split(self):
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|')
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', 'b|c|d'], 'a|b|c|d', 'split', '|', 1)
self.checkequal(['a', 'b', 'c|d'], 'a|b|c|d', 'split', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|',
sys.maxsize-2)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', '', 'b||c||d'], 'a||b||c||d', 'split', '|', 2)
self.checkequal(['abcd'], 'abcd', 'split', '|')
self.checkequal([''], '', 'split', '|')
self.checkequal(['endcase ', ''], 'endcase |', 'split', '|')
self.checkequal(['', ' startcase'], '| startcase', 'split', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'split', '|')
self.checkequal(['a', '', 'b\x00c\x00d'], 'a\x00\x00b\x00c\x00d', 'split', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'split', '|')
self.checkequal(['a']*15 +['a|a|a|a|a'],
('a|'*20)[:-1], 'split', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequal(['a', 'b//c//d'], 'a//b//c//d', 'split', '//', 1)
self.checkequal(['a', 'b', 'c//d'], 'a//b//c//d', 'split', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//',
sys.maxsize-10)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'split', '//', 0)
self.checkequal(['a', '', 'b////c////d'], 'a////b////c////d', 'split', '//', 2)
self.checkequal(['endcase ', ''], 'endcase test', 'split', 'test')
self.checkequal(['', ' begincase'], 'test begincase', 'split', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'split', 'test')
self.checkequal(['a', 'bc'], 'abbbc', 'split', 'bb')
self.checkequal(['', ''], 'aaa', 'split', 'aaa')
self.checkequal(['aaa'], 'aaa', 'split', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'split', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'split', 'aab')
self.checkequal([''], '', 'split', 'aaa')
self.checkequal(['aa'], 'aa', 'split', 'aaa')
self.checkequal(['A', 'bobb'], 'Abbobbbobb', 'split', 'bbobb')
self.checkequal(['A', 'B', ''], 'AbbobbBbbobb', 'split', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH', 19)
self.checkequal(['a']*18 + ['aBLAHa'], ('aBLAH'*20)[:-4],
'split', 'BLAH', 18)
# with keyword args
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', sep='|')
self.checkequal(['a', 'b|c|d'],
'a|b|c|d', 'split', '|', maxsplit=1)
self.checkequal(['a', 'b|c|d'],
'a|b|c|d', 'split', sep='|', maxsplit=1)
self.checkequal(['a', 'b|c|d'],
'a|b|c|d', 'split', maxsplit=1, sep='|')
self.checkequal(['a', 'b c d'],
'a b c d', 'split', maxsplit=1)
# argument type
self.checkraises(TypeError, 'hello', 'split', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'split', '')
self.checkraises(ValueError, 'hello', 'split', '', 0)
def test_rsplit(self):
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|')
self.checkequal(['a|b|c', 'd'], 'a|b|c|d', 'rsplit', '|', 1)
self.checkequal(['a|b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|',
sys.maxsize-100)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'rsplit', '|', 0)
self.checkequal(['a||b||c', '', 'd'], 'a||b||c||d', 'rsplit', '|', 2)
self.checkequal(['abcd'], 'abcd', 'rsplit', '|')
self.checkequal([''], '', 'rsplit', '|')
self.checkequal(['', ' begincase'], '| begincase', 'rsplit', '|')
self.checkequal(['endcase ', ''], 'endcase |', 'rsplit', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'rsplit', '|')
self.checkequal(['a\x00\x00b', 'c', 'd'], 'a\x00\x00b\x00c\x00d', 'rsplit', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'rsplit', '|')
self.checkequal(['a|a|a|a|a']+['a']*15,
('a|'*20)[:-1], 'rsplit', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//')
self.checkequal(['a//b//c', 'd'], 'a//b//c//d', 'rsplit', '//', 1)
self.checkequal(['a//b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//',
sys.maxsize-5)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'rsplit', '//', 0)
self.checkequal(['a////b////c', '', 'd'], 'a////b////c////d', 'rsplit', '//', 2)
self.checkequal(['', ' begincase'], 'test begincase', 'rsplit', 'test')
self.checkequal(['endcase ', ''], 'endcase test', 'rsplit', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'rsplit', 'test')
self.checkequal(['ab', 'c'], 'abbbc', 'rsplit', 'bb')
self.checkequal(['', ''], 'aaa', 'rsplit', 'aaa')
self.checkequal(['aaa'], 'aaa', 'rsplit', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'rsplit', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'rsplit', 'aab')
self.checkequal([''], '', 'rsplit', 'aaa')
self.checkequal(['aa'], 'aa', 'rsplit', 'aaa')
self.checkequal(['bbob', 'A'], 'bbobbbobbA', 'rsplit', 'bbobb')
self.checkequal(['', 'B', 'A'], 'bbobbBbbobbA', 'rsplit', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH', 19)
self.checkequal(['aBLAHa'] + ['a']*18, ('aBLAH'*20)[:-4],
'rsplit', 'BLAH', 18)
# with keyword args
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', sep='|')
self.checkequal(['a|b|c', 'd'],
'a|b|c|d', 'rsplit', '|', maxsplit=1)
self.checkequal(['a|b|c', 'd'],
'a|b|c|d', 'rsplit', sep='|', maxsplit=1)
self.checkequal(['a|b|c', 'd'],
'a|b|c|d', 'rsplit', maxsplit=1, sep='|')
self.checkequal(['a b c', 'd'],
'a b c d', 'rsplit', maxsplit=1)
# argument type
self.checkraises(TypeError, 'hello', 'rsplit', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'rsplit', '')
self.checkraises(ValueError, 'hello', 'rsplit', '', 0)
def test_replace(self):
EQ = self.checkequal
# Operations on the empty string
EQ("", "", "replace", "", "")
EQ("A", "", "replace", "", "A")
EQ("", "", "replace", "A", "")
EQ("", "", "replace", "A", "A")
EQ("", "", "replace", "", "", 100)
EQ("", "", "replace", "", "", sys.maxsize)
# interleave (from=="", 'to' gets inserted everywhere)
EQ("A", "A", "replace", "", "")
EQ("*A*", "A", "replace", "", "*")
EQ("*1A*1", "A", "replace", "", "*1")
EQ("*-#A*-#", "A", "replace", "", "*-#")
EQ("*-A*-A*-", "AA", "replace", "", "*-")
EQ("*-A*-A*-", "AA", "replace", "", "*-", -1)
EQ("*-A*-A*-", "AA", "replace", "", "*-", sys.maxsize)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 4)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 3)
EQ("*-A*-A", "AA", "replace", "", "*-", 2)
EQ("*-AA", "AA", "replace", "", "*-", 1)
EQ("AA", "AA", "replace", "", "*-", 0)
# single character deletion (from=="A", to=="")
EQ("", "A", "replace", "A", "")
EQ("", "AAA", "replace", "A", "")
EQ("", "AAA", "replace", "A", "", -1)
EQ("", "AAA", "replace", "A", "", sys.maxsize)
EQ("", "AAA", "replace", "A", "", 4)
EQ("", "AAA", "replace", "A", "", 3)
EQ("A", "AAA", "replace", "A", "", 2)
EQ("AA", "AAA", "replace", "A", "", 1)
EQ("AAA", "AAA", "replace", "A", "", 0)
EQ("", "AAAAAAAAAA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "", -1)
EQ("BCD", "ABACADA", "replace", "A", "", sys.maxsize)
EQ("BCD", "ABACADA", "replace", "A", "", 5)
EQ("BCD", "ABACADA", "replace", "A", "", 4)
EQ("BCDA", "ABACADA", "replace", "A", "", 3)
EQ("BCADA", "ABACADA", "replace", "A", "", 2)
EQ("BACADA", "ABACADA", "replace", "A", "", 1)
EQ("ABACADA", "ABACADA", "replace", "A", "", 0)
EQ("BCD", "ABCAD", "replace", "A", "")
EQ("BCD", "ABCADAA", "replace", "A", "")
EQ("BCD", "BCD", "replace", "A", "")
EQ("*************", "*************", "replace", "A", "")
EQ("^A^", "^"+"A"*1000+"^", "replace", "A", "", 999)
# substring deletion (from=="the", to=="")
EQ("", "the", "replace", "the", "")
EQ("ater", "theater", "replace", "the", "")
EQ("", "thethe", "replace", "the", "")
EQ("", "thethethethe", "replace", "the", "")
EQ("aaaa", "theatheatheathea", "replace", "the", "")
EQ("that", "that", "replace", "the", "")
EQ("thaet", "thaet", "replace", "the", "")
EQ("here and re", "here and there", "replace", "the", "")
EQ("here and re and re", "here and there and there",
"replace", "the", "", sys.maxsize)
EQ("here and re and re", "here and there and there",
"replace", "the", "", -1)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 3)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 2)
EQ("here and re and there", "here and there and there",
"replace", "the", "", 1)
EQ("here and there and there", "here and there and there",
"replace", "the", "", 0)
EQ("here and re and re", "here and there and there", "replace", "the", "")
EQ("abc", "abc", "replace", "the", "")
EQ("abcdefg", "abcdefg", "replace", "the", "")
# substring deletion (from=="bob", to=="")
EQ("bob", "bbobob", "replace", "bob", "")
EQ("bobXbob", "bbobobXbbobob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaabob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaa", "replace", "bob", "")
# single character replace in place (len(from)==len(to)==1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "o")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", sys.maxsize)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", -1)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 3)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 2)
EQ("WhO goes there?", "Who goes there?", "replace", "o", "O", 1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "O", 0)
EQ("Who goes there?", "Who goes there?", "replace", "a", "q")
EQ("who goes there?", "Who goes there?", "replace", "W", "w")
EQ("wwho goes there?ww", "WWho goes there?WW", "replace", "W", "w")
EQ("Who goes there!", "Who goes there?", "replace", "?", "!")
EQ("Who goes there!!", "Who goes there??", "replace", "?", "!")
EQ("Who goes there?", "Who goes there?", "replace", ".", "!")
# substring replace in place (len(from)==len(to) > 1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**")
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", sys.maxsize)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", -1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 4)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 3)
EQ("Th** ** a tissue", "This is a tissue", "replace", "is", "**", 2)
EQ("Th** is a tissue", "This is a tissue", "replace", "is", "**", 1)
EQ("This is a tissue", "This is a tissue", "replace", "is", "**", 0)
EQ("cobob", "bobob", "replace", "bob", "cob")
EQ("cobobXcobocob", "bobobXbobobob", "replace", "bob", "cob")
EQ("bobob", "bobob", "replace", "bot", "bot")
# replace single character (len(from)==1, len(to)>1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK")
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", -1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", sys.maxsize)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", 2)
EQ("ReyKKjavik", "Reykjavik", "replace", "k", "KK", 1)
EQ("Reykjavik", "Reykjavik", "replace", "k", "KK", 0)
EQ("A----B----C----", "A.B.C.", "replace", ".", "----")
# issue #15534
EQ('...\u043c......<', '...\u043c......<', "replace", "<", "<")
EQ("Reykjavik", "Reykjavik", "replace", "q", "KK")
# replace substring (len(from)>1, len(to)!=len(from))
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham")
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", sys.maxsize)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", -1)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 4)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 3)
EQ("ham, ham, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 2)
EQ("ham, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 1)
EQ("spam, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 0)
EQ("bobob", "bobobob", "replace", "bobob", "bob")
EQ("bobobXbobob", "bobobobXbobobob", "replace", "bobob", "bob")
EQ("BOBOBOB", "BOBOBOB", "replace", "bob", "bobby")
self.checkequal('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.checkequal('onetwothree', 'one!two!three!', 'replace', '!', '')
self.checkequal('one@two@three!', 'one!two!three!', 'replace', '!', '@', 2)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 3)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 4)
self.checkequal('one!two!three!', 'one!two!three!', 'replace', '!', '@', 0)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@', 2)
self.checkequal('-a-b-c-', 'abc', 'replace', '', '-')
self.checkequal('-a-b-c', 'abc', 'replace', '', '-', 3)
self.checkequal('abc', 'abc', 'replace', '', '-', 0)
self.checkequal('', '', 'replace', '', '')
self.checkequal('abc', 'abc', 'replace', 'ab', '--', 0)
self.checkequal('abc', 'abc', 'replace', 'xy', '--')
# Next three for SF bug 422088: [OSF1 alpha] string.replace(); died with
# MemoryError due to empty result (platform malloc issue when requesting
# 0 bytes).
self.checkequal('', '123', 'replace', '123', '')
self.checkequal('', '123123', 'replace', '123', '')
self.checkequal('x', '123x123', 'replace', '123', '')
self.checkraises(TypeError, 'hello', 'replace')
self.checkraises(TypeError, 'hello', 'replace', 42)
self.checkraises(TypeError, 'hello', 'replace', 42, 'h')
self.checkraises(TypeError, 'hello', 'replace', 'h', 42)
@unittest.skipIf(sys.maxsize > (1 << 32) or struct.calcsize('P') != 4,
'only applies to 32-bit platforms')
def test_replace_overflow(self):
# Check for overflow checking on 32 bit machines
A2_16 = "A" * (2**16)
self.checkraises(OverflowError, A2_16, "replace", "", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "A", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "AA", A2_16+A2_16)
def test_capitalize(self):
self.checkequal(' hello ', ' hello ', 'capitalize')
self.checkequal('Hello ', 'Hello ','capitalize')
self.checkequal('Hello ', 'hello ','capitalize')
self.checkequal('Aaaa', 'aaaa', 'capitalize')
self.checkequal('Aaaa', 'AaAa', 'capitalize')
self.checkraises(TypeError, 'hello', 'capitalize', 42)
def test_additional_split(self):
self.checkequal(['this', 'is', 'the', 'split', 'function'],
'this is the split function', 'split')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'split')
self.checkequal(['a', 'b c d'], 'a b c d', 'split', None, 1)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None,
sys.maxsize-1)
self.checkequal(['a b c d'], 'a b c d', 'split', None, 0)
self.checkequal(['a b c d'], ' a b c d', 'split', None, 0)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal([], ' ', 'split')
self.checkequal(['a'], ' a ', 'split')
self.checkequal(['a', 'b'], ' a b ', 'split')
self.checkequal(['a', 'b '], ' a b ', 'split', None, 1)
self.checkequal(['a b c '], ' a b c ', 'split', None, 0)
self.checkequal(['a', 'b c '], ' a b c ', 'split', None, 1)
self.checkequal(['a', 'b', 'c '], ' a b c ', 'split', None, 2)
self.checkequal(['a', 'b', 'c'], ' a b c ', 'split', None, 3)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'split')
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'split')
self.checkequal(['a'] + [aaa[4:]], aaa, 'split', None, 1)
self.checkequal(['a']*19 + ['a '], aaa, 'split', None, 19)
for b in ('arf\tbarf', 'arf\nbarf', 'arf\rbarf',
'arf\fbarf', 'arf\vbarf'):
self.checkequal(['arf', 'barf'], b, 'split')
self.checkequal(['arf', 'barf'], b, 'split', None)
self.checkequal(['arf', 'barf'], b, 'split', None, 2)
def test_additional_rsplit(self):
self.checkequal(['this', 'is', 'the', 'rsplit', 'function'],
'this is the rsplit function', 'rsplit')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'rsplit')
self.checkequal(['a b c', 'd'], 'a b c d', 'rsplit', None, 1)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None,
sys.maxsize-20)
self.checkequal(['a b c d'], 'a b c d', 'rsplit', None, 0)
self.checkequal(['a b c d'], 'a b c d ', 'rsplit', None, 0)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal([], ' ', 'rsplit')
self.checkequal(['a'], ' a ', 'rsplit')
self.checkequal(['a', 'b'], ' a b ', 'rsplit')
self.checkequal([' a', 'b'], ' a b ', 'rsplit', None, 1)
self.checkequal([' a b c'], ' a b c ', 'rsplit',
None, 0)
self.checkequal([' a b','c'], ' a b c ', 'rsplit',
None, 1)
self.checkequal([' a', 'b', 'c'], ' a b c ', 'rsplit',
None, 2)
self.checkequal(['a', 'b', 'c'], ' a b c ', 'rsplit',
None, 3)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'rsplit', None, 88)
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'rsplit')
self.checkequal([aaa[:-4]] + ['a'], aaa, 'rsplit', None, 1)
self.checkequal([' a a'] + ['a']*18, aaa, 'rsplit', None, 18)
for b in ('arf\tbarf', 'arf\nbarf', 'arf\rbarf',
'arf\fbarf', 'arf\vbarf'):
self.checkequal(['arf', 'barf'], b, 'rsplit')
self.checkequal(['arf', 'barf'], b, 'rsplit', None)
self.checkequal(['arf', 'barf'], b, 'rsplit', None, 2)
def test_strip_whitespace(self):
self.checkequal('hello', ' hello ', 'strip')
self.checkequal('hello ', ' hello ', 'lstrip')
self.checkequal(' hello', ' hello ', 'rstrip')
self.checkequal('hello', 'hello', 'strip')
b = ' \t\n\r\f\vabc \t\n\r\f\v'
self.checkequal('abc', b, 'strip')
self.checkequal('abc \t\n\r\f\v', b, 'lstrip')
self.checkequal(' \t\n\r\f\vabc', b, 'rstrip')
# strip/lstrip/rstrip with None arg
self.checkequal('hello', ' hello ', 'strip', None)
self.checkequal('hello ', ' hello ', 'lstrip', None)
self.checkequal(' hello', ' hello ', 'rstrip', None)
self.checkequal('hello', 'hello', 'strip', None)
def test_strip(self):
# strip/lstrip/rstrip with str arg
self.checkequal('hello', 'xyzzyhelloxyzzy', 'strip', 'xyz')
self.checkequal('helloxyzzy', 'xyzzyhelloxyzzy', 'lstrip', 'xyz')
self.checkequal('xyzzyhello', 'xyzzyhelloxyzzy', 'rstrip', 'xyz')
self.checkequal('hello', 'hello', 'strip', 'xyz')
self.checkequal('', 'mississippi', 'strip', 'mississippi')
# only trim the start and end; does not strip internal characters
self.checkequal('mississipp', 'mississippi', 'strip', 'i')
self.checkraises(TypeError, 'hello', 'strip', 42, 42)
self.checkraises(TypeError, 'hello', 'lstrip', 42, 42)
self.checkraises(TypeError, 'hello', 'rstrip', 42, 42)
def test_ljust(self):
self.checkequal('abc ', 'abc', 'ljust', 10)
self.checkequal('abc ', 'abc', 'ljust', 6)
self.checkequal('abc', 'abc', 'ljust', 3)
self.checkequal('abc', 'abc', 'ljust', 2)
self.checkequal('abc*******', 'abc', 'ljust', 10, '*')
self.checkraises(TypeError, 'abc', 'ljust')
def test_rjust(self):
self.checkequal(' abc', 'abc', 'rjust', 10)
self.checkequal(' abc', 'abc', 'rjust', 6)
self.checkequal('abc', 'abc', 'rjust', 3)
self.checkequal('abc', 'abc', 'rjust', 2)
self.checkequal('*******abc', 'abc', 'rjust', 10, '*')
self.checkraises(TypeError, 'abc', 'rjust')
def test_center(self):
self.checkequal(' abc ', 'abc', 'center', 10)
self.checkequal(' abc ', 'abc', 'center', 6)
self.checkequal('abc', 'abc', 'center', 3)
self.checkequal('abc', 'abc', 'center', 2)
self.checkequal('***abc****', 'abc', 'center', 10, '*')
self.checkraises(TypeError, 'abc', 'center')
def test_swapcase(self):
self.checkequal('hEllO CoMPuTErS', 'HeLLo cOmpUteRs', 'swapcase')
self.checkraises(TypeError, 'hello', 'swapcase', 42)
def test_zfill(self):
self.checkequal('123', '123', 'zfill', 2)
self.checkequal('123', '123', 'zfill', 3)
self.checkequal('0123', '123', 'zfill', 4)
self.checkequal('+123', '+123', 'zfill', 3)
self.checkequal('+123', '+123', 'zfill', 4)
self.checkequal('+0123', '+123', 'zfill', 5)
self.checkequal('-123', '-123', 'zfill', 3)
self.checkequal('-123', '-123', 'zfill', 4)
self.checkequal('-0123', '-123', 'zfill', 5)
self.checkequal('000', '', 'zfill', 3)
self.checkequal('34', '34', 'zfill', 1)
self.checkequal('0034', '34', 'zfill', 4)
self.checkraises(TypeError, '123', 'zfill')
def test_islower(self):
self.checkequal(False, '', 'islower')
self.checkequal(True, 'a', 'islower')
self.checkequal(False, 'A', 'islower')
self.checkequal(False, '\n', 'islower')
self.checkequal(True, 'abc', 'islower')
self.checkequal(False, 'aBc', 'islower')
self.checkequal(True, 'abc\n', 'islower')
self.checkraises(TypeError, 'abc', 'islower', 42)
def test_isupper(self):
self.checkequal(False, '', 'isupper')
self.checkequal(False, 'a', 'isupper')
self.checkequal(True, 'A', 'isupper')
self.checkequal(False, '\n', 'isupper')
self.checkequal(True, 'ABC', 'isupper')
self.checkequal(False, 'AbC', 'isupper')
self.checkequal(True, 'ABC\n', 'isupper')
self.checkraises(TypeError, 'abc', 'isupper', 42)
def test_istitle(self):
self.checkequal(False, '', 'istitle')
self.checkequal(False, 'a', 'istitle')
self.checkequal(True, 'A', 'istitle')
self.checkequal(False, '\n', 'istitle')
self.checkequal(True, 'A Titlecased Line', 'istitle')
self.checkequal(True, 'A\nTitlecased Line', 'istitle')
self.checkequal(True, 'A Titlecased, Line', 'istitle')
self.checkequal(False, 'Not a capitalized String', 'istitle')
self.checkequal(False, 'Not\ta Titlecase String', 'istitle')
self.checkequal(False, 'Not--a Titlecase String', 'istitle')
self.checkequal(False, 'NOT', 'istitle')
self.checkraises(TypeError, 'abc', 'istitle', 42)
def test_isspace(self):
self.checkequal(False, '', 'isspace')
self.checkequal(False, 'a', 'isspace')
self.checkequal(True, ' ', 'isspace')
self.checkequal(True, '\t', 'isspace')
self.checkequal(True, '\r', 'isspace')
self.checkequal(True, '\n', 'isspace')
self.checkequal(True, ' \t\r\n', 'isspace')
self.checkequal(False, ' \t\r\na', 'isspace')
self.checkraises(TypeError, 'abc', 'isspace', 42)
def test_isalpha(self):
self.checkequal(False, '', 'isalpha')
self.checkequal(True, 'a', 'isalpha')
self.checkequal(True, 'A', 'isalpha')
self.checkequal(False, '\n', 'isalpha')
self.checkequal(True, 'abc', 'isalpha')
self.checkequal(False, 'aBc123', 'isalpha')
self.checkequal(False, 'abc\n', 'isalpha')
self.checkraises(TypeError, 'abc', 'isalpha', 42)
def test_isalnum(self):
self.checkequal(False, '', 'isalnum')
self.checkequal(True, 'a', 'isalnum')
self.checkequal(True, 'A', 'isalnum')
self.checkequal(False, '\n', 'isalnum')
self.checkequal(True, '123abc456', 'isalnum')
self.checkequal(True, 'a1b3c', 'isalnum')
self.checkequal(False, 'aBc000 ', 'isalnum')
self.checkequal(False, 'abc\n', 'isalnum')
self.checkraises(TypeError, 'abc', 'isalnum', 42)
def test_isdigit(self):
self.checkequal(False, '', 'isdigit')
self.checkequal(False, 'a', 'isdigit')
self.checkequal(True, '0', 'isdigit')
self.checkequal(True, '0123456789', 'isdigit')
self.checkequal(False, '0123456789a', 'isdigit')
self.checkraises(TypeError, 'abc', 'isdigit', 42)
def test_title(self):
self.checkequal(' Hello ', ' hello ', 'title')
self.checkequal('Hello ', 'hello ', 'title')
self.checkequal('Hello ', 'Hello ', 'title')
self.checkequal('Format This As Title String', "fOrMaT thIs aS titLe String", 'title')
self.checkequal('Format,This-As*Title;String', "fOrMaT,thIs-aS*titLe;String", 'title', )
self.checkequal('Getint', "getInt", 'title')
self.checkraises(TypeError, 'hello', 'title', 42)
def test_splitlines(self):
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\rghi", 'splitlines')
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi\n", 'splitlines')
self.checkequal(['abc', 'def', 'ghi', ''], "abc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['', 'abc', 'def', 'ghi', ''], "\nabc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['', 'abc', 'def', 'ghi', ''],
"\nabc\ndef\r\nghi\n\r", 'splitlines', False)
self.checkequal(['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'],
"\nabc\ndef\r\nghi\n\r", 'splitlines', True)
self.checkequal(['', 'abc', 'def', 'ghi', ''], "\nabc\ndef\r\nghi\n\r",
'splitlines', keepends=False)
self.checkequal(['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'],
"\nabc\ndef\r\nghi\n\r", 'splitlines', keepends=True)
self.checkraises(TypeError, 'abc', 'splitlines', 42, 42)
class CommonTest(BaseTest):
# This testcase contains tests that can be used in all
# stringlike classes. Currently this is str and UserString.
def test_hash(self):
# SF bug 1054139: += optimization was not invalidating cached hash value
a = self.type2test('DNSSEC')
b = self.type2test('')
for c in a:
b += c
hash(b)
self.assertEqual(hash(a), hash(b))
def test_capitalize_nonascii(self):
# check that titlecased chars are lowered correctly
# \u1ffc is the titlecased char
self.checkequal('\u03a9\u0399\u1ff3\u1ff3\u1ff3',
'\u1ff3\u1ff3\u1ffc\u1ffc', 'capitalize')
# check with cased non-letter chars
self.checkequal('\u24c5\u24e8\u24e3\u24d7\u24de\u24dd',
'\u24c5\u24ce\u24c9\u24bd\u24c4\u24c3', 'capitalize')
self.checkequal('\u24c5\u24e8\u24e3\u24d7\u24de\u24dd',
'\u24df\u24e8\u24e3\u24d7\u24de\u24dd', 'capitalize')
self.checkequal('\u2160\u2171\u2172',
'\u2160\u2161\u2162', 'capitalize')
self.checkequal('\u2160\u2171\u2172',
'\u2170\u2171\u2172', 'capitalize')
# check with Ll chars with no upper - nothing changes here
self.checkequal('\u019b\u1d00\u1d86\u0221\u1fb7',
'\u019b\u1d00\u1d86\u0221\u1fb7', 'capitalize')
class MixinStrUnicodeUserStringTest:
# additional tests that only work for
# stringlike objects, i.e. str, UserString
def test_startswith(self):
self.checkequal(True, 'hello', 'startswith', 'he')
self.checkequal(True, 'hello', 'startswith', 'hello')
self.checkequal(False, 'hello', 'startswith', 'hello world')
self.checkequal(True, 'hello', 'startswith', '')
self.checkequal(False, 'hello', 'startswith', 'ello')
self.checkequal(True, 'hello', 'startswith', 'ello', 1)
self.checkequal(True, 'hello', 'startswith', 'o', 4)
self.checkequal(False, 'hello', 'startswith', 'o', 5)
self.checkequal(True, 'hello', 'startswith', '', 5)
self.checkequal(False, 'hello', 'startswith', 'lo', 6)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'startswith', 'lowo', 3, 6)
self.checkequal(True, '', 'startswith', '', 0, 1)
self.checkequal(True, '', 'startswith', '', 0, 0)
self.checkequal(False, '', 'startswith', '', 1, 0)
# test negative indices
self.checkequal(True, 'hello', 'startswith', 'he', 0, -1)
self.checkequal(True, 'hello', 'startswith', 'he', -53, -1)
self.checkequal(False, 'hello', 'startswith', 'hello', 0, -1)
self.checkequal(False, 'hello', 'startswith', 'hello world', -1, -10)
self.checkequal(False, 'hello', 'startswith', 'ello', -5)
self.checkequal(True, 'hello', 'startswith', 'ello', -4)
self.checkequal(False, 'hello', 'startswith', 'o', -2)
self.checkequal(True, 'hello', 'startswith', 'o', -1)
self.checkequal(True, 'hello', 'startswith', '', -3, -3)
self.checkequal(False, 'hello', 'startswith', 'lo', -9)
self.checkraises(TypeError, 'hello', 'startswith')
self.checkraises(TypeError, 'hello', 'startswith', 42)
# test tuple arguments
self.checkequal(True, 'hello', 'startswith', ('he', 'ha'))
self.checkequal(False, 'hello', 'startswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'startswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'startswith', ())
self.checkequal(True, 'helloworld', 'startswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'startswith', ('hellowo', 'ello',
'rld'), 3)
self.checkequal(True, 'hello', 'startswith', ('lo', 'he'), 0, -1)
self.checkequal(False, 'hello', 'startswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'startswith', ('he', 'hel'), 0, 2)
self.checkraises(TypeError, 'hello', 'startswith', (42,))
def test_endswith(self):
self.checkequal(True, 'hello', 'endswith', 'lo')
self.checkequal(False, 'hello', 'endswith', 'he')
self.checkequal(True, 'hello', 'endswith', '')
self.checkequal(False, 'hello', 'endswith', 'hello world')
self.checkequal(False, 'helloworld', 'endswith', 'worl')
self.checkequal(True, 'helloworld', 'endswith', 'worl', 3, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', 3, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 1, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 2, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 4, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, 8)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 1)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 0)
self.checkequal(True, '', 'endswith', '', 0, 1)
self.checkequal(True, '', 'endswith', '', 0, 0)
self.checkequal(False, '', 'endswith', '', 1, 0)
# test negative indices
self.checkequal(True, 'hello', 'endswith', 'lo', -2)
self.checkequal(False, 'hello', 'endswith', 'he', -2)
self.checkequal(True, 'hello', 'endswith', '', -3, -3)
self.checkequal(False, 'hello', 'endswith', 'hello world', -10, -2)
self.checkequal(False, 'helloworld', 'endswith', 'worl', -6)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, -1)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', -7, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -99, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -8, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -7, -3)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, -4)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', -8, -2)
self.checkraises(TypeError, 'hello', 'endswith')
self.checkraises(TypeError, 'hello', 'endswith', 42)
# test tuple arguments
self.checkequal(False, 'hello', 'endswith', ('he', 'ha'))
self.checkequal(True, 'hello', 'endswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'endswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'endswith', ())
self.checkequal(True, 'helloworld', 'endswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'endswith', ('hellowo', 'ello',
'rld'), 3, -1)
self.checkequal(True, 'hello', 'endswith', ('hell', 'ell'), 0, -1)
self.checkequal(False, 'hello', 'endswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'endswith', ('he', 'hell'), 0, 4)
self.checkraises(TypeError, 'hello', 'endswith', (42,))
def test___contains__(self):
self.checkequal(True, '', '__contains__', '')
self.checkequal(True, 'abc', '__contains__', '')
self.checkequal(False, 'abc', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', '\0')
self.checkequal(True, 'abc\0', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', 'a')
self.checkequal(True, 'asdf', '__contains__', 'asdf')
self.checkequal(False, 'asd', '__contains__', 'asdf')
self.checkequal(False, '', '__contains__', 'asdf')
def test_subscript(self):
self.checkequal('a', 'abc', '__getitem__', 0)
self.checkequal('c', 'abc', '__getitem__', -1)
self.checkequal('a', 'abc', '__getitem__', 0)
self.checkequal('abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal('abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal('a', 'abc', '__getitem__', slice(0, 1))
self.checkequal('', 'abc', '__getitem__', slice(0, 0))
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_slice(self):
self.checkequal('abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal('abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal('ab', 'abc', '__getitem__', slice(0, 2))
self.checkequal('bc', 'abc', '__getitem__', slice(1, 3))
self.checkequal('b', 'abc', '__getitem__', slice(1, 2))
self.checkequal('', 'abc', '__getitem__', slice(2, 2))
self.checkequal('', 'abc', '__getitem__', slice(1000, 1000))
self.checkequal('', 'abc', '__getitem__', slice(2000, 1000))
self.checkequal('', 'abc', '__getitem__', slice(2, 1))
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = string.ascii_letters + string.digits
indices = (0, None, 1, 3, 41, -1, -2, -37)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
L = list(s)[start:stop:step]
self.checkequal("".join(L), s, '__getitem__',
slice(start, stop, step))
def test_mul(self):
self.checkequal('', 'abc', '__mul__', -1)
self.checkequal('', 'abc', '__mul__', 0)
self.checkequal('abc', 'abc', '__mul__', 1)
self.checkequal('abcabcabc', 'abc', '__mul__', 3)
self.checkraises(TypeError, 'abc', '__mul__')
self.checkraises(TypeError, 'abc', '__mul__', '')
# XXX: on a 64-bit system, this doesn't raise an overflow error,
# but either raises a MemoryError, or succeeds (if you have 54TiB)
#self.checkraises(OverflowError, 10000*'abc', '__mul__', 2000000000)
def test_join(self):
# join now works with any sequence type
# moved here, because the argument order is
# different in string.join
self.checkequal('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequal('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequal('bd', '', 'join', ('', 'b', '', 'd'))
self.checkequal('ac', '', 'join', ('a', '', 'c', ''))
self.checkequal('w x y z', ' ', 'join', Sequence())
self.checkequal('abc', 'a', 'join', ('abc',))
self.checkequal('z', 'a', 'join', UserList(['z']))
self.checkequal('a.b.c', '.', 'join', ['a', 'b', 'c'])
self.assertRaises(TypeError, '.'.join, ['a', 'b', 3])
for i in [5, 25, 125]:
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
['a' * i] * i)
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
('a' * i,) * i)
#self.checkequal(str(BadSeq1()), ' ', 'join', BadSeq1())
self.checkequal('a b c', ' ', 'join', BadSeq2())
self.checkraises(TypeError, ' ', 'join')
self.checkraises(TypeError, ' ', 'join', None)
self.checkraises(TypeError, ' ', 'join', 7)
self.checkraises(TypeError, ' ', 'join', [1, 2, bytes()])
try:
def f():
yield 4 + ""
self.fixtype(' ').join(f())
except TypeError as e:
if '+' not in str(e):
self.fail('join() ate exception message')
else:
self.fail('exception not raised')
def test_formatting(self):
self.checkequal('+hello+', '+%s+', '__mod__', 'hello')
self.checkequal('+10+', '+%d+', '__mod__', 10)
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('"', "%c", '__mod__', 34)
self.checkequal('$', "%c", '__mod__', 36)
self.checkequal('10', "%d", '__mod__', 10)
self.checkequal('\x7f', "%c", '__mod__', 0x7f)
for ordinal in (-100, 0x200000):
# unicode raises ValueError, str raises OverflowError
self.checkraises((ValueError, OverflowError), '%c', '__mod__', ordinal)
longvalue = sys.maxsize + 10
slongvalue = str(longvalue)
self.checkequal(' 42', '%3ld', '__mod__', 42)
self.checkequal('42', '%d', '__mod__', 42.0)
self.checkequal(slongvalue, '%d', '__mod__', longvalue)
self.checkcall('%d', '__mod__', float(longvalue))
self.checkequal('0042.00', '%07.2f', '__mod__', 42)
self.checkequal('0042.00', '%07.2F', '__mod__', 42)
self.checkraises(TypeError, 'abc', '__mod__')
self.checkraises(TypeError, '%(foo)s', '__mod__', 42)
self.checkraises(TypeError, '%s%s', '__mod__', (42,))
self.checkraises(TypeError, '%c', '__mod__', (None,))
self.checkraises(ValueError, '%(foo', '__mod__', {})
self.checkraises(TypeError, '%(foo)s %(bar)s', '__mod__', ('foo', 42))
self.checkraises(TypeError, '%d', '__mod__', "42") # not numeric
self.checkraises(TypeError, '%d', '__mod__', (42+0j)) # no int conversion provided
# argument names with properly nested brackets are supported
self.checkequal('bar', '%((foo))s', '__mod__', {'(foo)': 'bar'})
# 100 is a magic number in PyUnicode_Format, this forces a resize
self.checkequal(103*'a'+'x', '%sx', '__mod__', 103*'a')
self.checkraises(TypeError, '%*s', '__mod__', ('foo', 'bar'))
self.checkraises(TypeError, '%10.*f', '__mod__', ('foo', 42.))
self.checkraises(ValueError, '%10', '__mod__', (42,))
# Outrageously large width or precision should raise ValueError.
self.checkraises(ValueError, '%%%df' % (2**64), '__mod__', (3.2))
self.checkraises(ValueError, '%%.%df' % (2**64), '__mod__', (3.2))
self.checkraises(OverflowError, '%*s', '__mod__',
(sys.maxsize + 1, ''))
self.checkraises(OverflowError, '%.*f', '__mod__',
(sys.maxsize + 1, 1. / 7))
class X(object): pass
self.checkraises(TypeError, 'abc', '__mod__', X())
@support.cpython_only
def test_formatting_c_limits(self):
from _testcapi import PY_SSIZE_T_MAX, INT_MAX, UINT_MAX
SIZE_MAX = (1 << (PY_SSIZE_T_MAX.bit_length() + 1)) - 1
self.checkraises(OverflowError, '%*s', '__mod__',
(PY_SSIZE_T_MAX + 1, ''))
self.checkraises(OverflowError, '%.*f', '__mod__',
(INT_MAX + 1, 1. / 7))
# Issue 15989
self.checkraises(OverflowError, '%*s', '__mod__',
(SIZE_MAX + 1, ''))
self.checkraises(OverflowError, '%.*f', '__mod__',
(UINT_MAX + 1, 1. / 7))
def test_floatformatting(self):
# float formatting
for prec in range(100):
format = '%%.%if' % prec
value = 0.01
for x in range(60):
value = value * 3.14159265359 / 3.0 * 10.0
self.checkcall(format, "__mod__", value)
def test_inplace_rewrites(self):
# Check that strings don't copy and modify cached single-character strings
self.checkequal('a', 'A', 'lower')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'upper')
self.checkequal(True, 'a', 'islower')
self.checkequal('a', 'A', 'replace', 'A', 'a')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'capitalize')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'swapcase')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'title')
self.checkequal(True, 'a', 'islower')
def test_partition(self):
self.checkequal(('this is the par', 'ti', 'tion method'),
'this is the partition method', 'partition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'partition', '://')
self.checkequal(('http://www.python.org', '', ''), S, 'partition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'partition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'partition', 'org')
self.checkraises(ValueError, S, 'partition', '')
self.checkraises(TypeError, S, 'partition', None)
def test_rpartition(self):
self.checkequal(('this is the rparti', 'ti', 'on method'),
'this is the rpartition method', 'rpartition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'rpartition', '://')
self.checkequal(('', '', 'http://www.python.org'), S, 'rpartition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'rpartition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'rpartition', 'org')
self.checkraises(ValueError, S, 'rpartition', '')
self.checkraises(TypeError, S, 'rpartition', None)
def test_none_arguments(self):
# issue 11828
s = 'hello'
self.checkequal(2, s, 'find', 'l', None)
self.checkequal(3, s, 'find', 'l', -2, None)
self.checkequal(2, s, 'find', 'l', None, -2)
self.checkequal(0, s, 'find', 'h', None, None)
self.checkequal(3, s, 'rfind', 'l', None)
self.checkequal(3, s, 'rfind', 'l', -2, None)
self.checkequal(2, s, 'rfind', 'l', None, -2)
self.checkequal(0, s, 'rfind', 'h', None, None)
self.checkequal(2, s, 'index', 'l', None)
self.checkequal(3, s, 'index', 'l', -2, None)
self.checkequal(2, s, 'index', 'l', None, -2)
self.checkequal(0, s, 'index', 'h', None, None)
self.checkequal(3, s, 'rindex', 'l', None)
self.checkequal(3, s, 'rindex', 'l', -2, None)
self.checkequal(2, s, 'rindex', 'l', None, -2)
self.checkequal(0, s, 'rindex', 'h', None, None)
self.checkequal(2, s, 'count', 'l', None)
self.checkequal(1, s, 'count', 'l', -2, None)
self.checkequal(1, s, 'count', 'l', None, -2)
self.checkequal(0, s, 'count', 'x', None, None)
self.checkequal(True, s, 'endswith', 'o', None)
self.checkequal(True, s, 'endswith', 'lo', -2, None)
self.checkequal(True, s, 'endswith', 'l', None, -2)
self.checkequal(False, s, 'endswith', 'x', None, None)
self.checkequal(True, s, 'startswith', 'h', None)
self.checkequal(True, s, 'startswith', 'l', -2, None)
self.checkequal(True, s, 'startswith', 'h', None, -2)
self.checkequal(False, s, 'startswith', 'x', None, None)
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
s = 'hello'
x = 'x'
self.assertRaisesRegex(TypeError, r'^find\(', s.find,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^rfind\(', s.rfind,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^index\(', s.index,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^rindex\(', s.rindex,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^count\(', s.count,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^startswith\(', s.startswith,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^endswith\(', s.endswith,
x, None, None, None)
# issue #15534
self.checkequal(10, "...\u043c......<", "find", "<")
class MixinStrUnicodeTest:
# Additional tests that only work with str.
def test_bug1001011(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
# Check the optimisation still occurs for standard objects.
t = self.type2test
class subclass(t):
pass
s1 = subclass("abcd")
s2 = t().join([s1])
self.assertIsNot(s1, s2)
self.assertIs(type(s2), t)
s1 = t("abcd")
s2 = t().join([s1])
self.assertIs(s1, s2)
| [
"jtunney@gmail.com"
] | jtunney@gmail.com |
548c1d70cafd9449e416a68b4facefb8c5b1fd68 | be67e8736f8437d8ded442326da2b899c97cfad5 | /spider/__init__.py | 086be4c911b49658fa9b9bd4bbd7672eb168ce8e | [
"BSD-2-Clause"
] | permissive | BeiFenKu/PSpider | e4b76fc19fbd3fc23dddc10bfb84d57f3887c545 | 44d8ed3e006e0812621f45c35c1bb59557c84e2e | refs/heads/master | 2020-06-18T05:11:04.933335 | 2019-07-05T02:56:06 | 2019-07-05T06:31:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | # _*_ coding: utf-8 _*_
"""
define WebSpider, and also define utilities and instances for web_spider
"""
__version__ = "2.3.0"
from .utilities import *
from .concurrent import TPEnum, WebSpider
from .instances import Fetcher, Parser, Saver, Proxieser
| [
"qixianhu@qq.com"
] | qixianhu@qq.com |
303bd68ad340b22c922e6ab0e2584613fe64719f | 195592971a36c6593372a77c9b593c9482195c38 | /rbac/context.py | 7e30600aaab1a4189a2fdfaa61618ee476a1ed88 | [
"MIT"
] | permissive | hxz2015/simple-rbac | 2a2c9b771296cbdc5d458c97ee85c40e1a717c21 | 5d975c5cd3faaaa5ba6bbe5e72b215fa66b718eb | refs/heads/master | 2020-12-25T08:59:36.881324 | 2012-05-22T06:13:06 | 2012-05-22T06:13:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,544 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import functools
__all__ = ["IdentityContext", "PermissionDenied"]
class PermissionContext(object):
"""A context of decorator to check the permission."""
def __init__(self, checker):
self.check = checker
self.in_context = False
def __call__(self, wrapped):
def wrapper(*args, **kwargs):
with self:
return wrapped(*args, **kwargs)
return functools.update_wrapper(wrapper, wrapped)
def __enter__(self):
self.in_context = True
self.check()
return self
def __exit__(self, exception_type, exception, traceback):
self.in_context = False
def __nonzero__(self):
try:
self.check()
except PermissionDenied:
return False
else:
return True
class IdentityContext(object):
"""A context of identity, providing the enviroment to control access."""
def __init__(self, acl, roles_loader=None):
self.acl = acl
self.set_roles_loader(roles_loader)
def set_roles_loader(self, role_loader):
"""Set a callable object (such as a function) which could return a
iteration to provide all roles of current context user.
Example:
>>> @context.set_roles_loader
... def load_roles():
... user = request.context.current_user
... for role in user.roles:
... yield role
"""
self.load_roles = role_loader
def check_permission(self, operation, resource, **exception_kwargs):
"""A decorator to check the permission.
The keyword arguments would be stored into the attribute `kwargs` of
the exception `PermissionDenied`.
"""
checker = functools.partial(self._docheck, operation=operation,
resource=resource, **exception_kwargs)
return PermissionContext(checker)
def _docheck(self, operation, resource, **exception_kwargs):
roles = self.load_roles()
if not self.acl.is_any_allowed(roles, operation, resource):
exception = exception_kwargs.pop("exception", PermissionDenied)
raise exception(**exception_kwargs)
return True
class PermissionDenied(Exception):
"""The exception for denied access request."""
def __init__(self, message="", **kwargs):
super(PermissionDenied, self).__init__(message)
self.kwargs = kwargs
self.kwargs['message'] = message
| [
"tonyseek@gmail.com"
] | tonyseek@gmail.com |
475fea2c52e68d17ae70c60ec9c7696f1541de5d | 51a2fb45db6a074c7bd5af32c8ee8471251436f4 | /第六章-pytest框架/pycharm执行pytest脚本04.py | aa07b70cda12e66c1cc6219137e7c0b1cd9a4b43 | [] | no_license | JiangHuYiXiao/Web-Autotest-Python | c5e2cf61a5a62d132df048d3218dfb973be8784e | 65b30360337b56b6ca4eba21f729c922f1665489 | refs/heads/master | 2021-08-26T07:46:42.957744 | 2021-08-12T02:24:11 | 2021-08-12T02:24:11 | 253,945,694 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # -*- coding:utf-8 -*-
# @Author : 江湖一笑
# @Time : 2020/5/1 10:19
# @Software : Web-Autotest-Python
# @Python_verison : 3.7
# 之前执行python文件时候,我们在文件中直接右键,然后run
# 执行unittest脚本时候,我们是run unittest
# 但是执行pytest脚本时直接执行是不生效的,这个时候,需要我们设置一下编辑器pycharm的关于该文件的默认runner
# 配置路径:setting--->baidu_tools--->python integrated baidu_tools --->testing--->default tester runner
# 配置完成后需要在该文件的父目录下右键进行执行,在该文件上执行还是不行的
| [
"1163270704@qq.com"
] | 1163270704@qq.com |
fe4318402e9a3ece4700c76f73180ff008a72990 | b9efe70d12c2cbd55065d02e974f5725534583ee | /src/visualize.py | 8f0d478c6b67f04feb8db8746118f6f3d3f78fe5 | [] | no_license | diegoami/bankdomain_PY | 5089581ea7b7db6233243dff305488ff27dc8e90 | 83816e1beb96d3e9e0f746bec7f9db9521f32ee7 | refs/heads/master | 2022-12-17T05:05:13.557911 | 2020-06-03T22:19:44 | 2020-06-03T22:19:44 | 131,530,574 | 0 | 0 | null | 2022-12-08T01:30:27 | 2018-04-29T21:12:25 | HTML | UTF-8 | Python | false | false | 719 | py |
import yaml
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from query import QueryExecutor
if __name__ == '__main__':
config = yaml.safe_load(open('config.yml'))
models_dir = config['models_dir']
mongo_connection = config['mongo_connection']
query_executor = QueryExecutor(mongo_connection, models_dir)
doc2vec_similar(query_executor)
X = model[model.wv.vocab]
pca = PCA(n_components=2)
result = pca.fit_transform(X)
# create a scatter plot of the projection
pyplot.scatter(result[:, 0], result[:, 1])
words = list(model.wv.vocab)
for i, word in enumerate(words):
pyplot.annotate(word, xy=(result[i, 0], result[i, 1]))
pyplot.show() | [
"diego.amicabile@gmail.com"
] | diego.amicabile@gmail.com |
30dadc0842eb8cfffe8c22e53818c815aa56b7cd | 3ea104409b5ab5f1d1928af7d31b4a58b11d220a | /venv/Lib/site-packages/networkx/algorithms/approximation/steinertree.py | f999694d811236225ab2b782b57a53c9e3e2f377 | [
"Apache-2.0"
] | permissive | farhananwari07/flask-image-processing | 0103ab0600995a760e27ffc644ffb313de4eaade | a4a4ad717ffd074afbe31cbf8803060764034375 | refs/heads/main | 2023-09-02T01:21:27.328049 | 2021-11-10T07:58:17 | 2021-11-10T07:58:17 | 425,517,466 | 0 | 0 | Apache-2.0 | 2021-11-07T13:55:56 | 2021-11-07T13:55:56 | null | UTF-8 | Python | false | false | 3,376 | py | from itertools import chain
from networkx.utils import pairwise, not_implemented_for
import networkx as nx
__all__ = ["metric_closure", "steiner_tree"]
@not_implemented_for("directed")
def metric_closure(G, weight="weight"):
"""Return the metric closure of a graph.
The metric closure of a graph *G* is the complete graph in which each edge
is weighted by the shortest path distance between the nodes in *G* .
Parameters
----------
G : NetworkX graph
Returns
-------
NetworkX graph
Metric closure of the graph `G`.
"""
M = nx.Graph()
Gnodes = set(G)
# check for connected graph while processing first node
all_paths_iter = nx.all_pairs_dijkstra(G, weight=weight)
u, (distance, path) = next(all_paths_iter)
if Gnodes - set(distance):
msg = "G is not a connected graph. metric_closure is not defined."
raise nx.NetworkXError(msg)
Gnodes.remove(u)
for v in Gnodes:
M.add_edge(u, v, distance=distance[v], path=path[v])
# first node done -- now process the rest
for u, (distance, path) in all_paths_iter:
Gnodes.remove(u)
for v in Gnodes:
M.add_edge(u, v, distance=distance[v], path=path[v])
return M
@not_implemented_for("directed")
def steiner_tree(G, terminal_nodes, weight="weight"):
"""Return an approximation to the minimum Steiner tree of a graph.
The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes`
is a tree within `G` that spans those nodes and has minimum size
(sum of edge weights) among all such trees.
The minimum Steiner tree can be approximated by computing the minimum
spanning tree of the subgraph of the metric closure of *G* induced by the
terminal nodes, where the metric closure of *G* is the complete graph in
which each edge is weighted by the shortest path distance between the
nodes in *G* .
This algorithm produces a tree whose weight is within a (2 - (2 / t))
factor of the weight of the optimal Steiner tree where *t* is number of
terminal nodes.
Parameters
----------
G : NetworkX graph
terminal_nodes : list
A list of terminal nodes for which minimum steiner tree is
to be found.
Returns
-------
NetworkX graph
Approximation to the minimum steiner tree of `G` induced by
`terminal_nodes` .
Notes
-----
For multigraphs, the edge between two nodes with minimum weight is the
edge put into the Steiner tree.
References
----------
.. [1] Steiner_tree_problem on Wikipedia.
https://en.wikipedia.org/wiki/Steiner_tree_problem
"""
# H is the subgraph induced by terminal_nodes in the metric closure M of G.
M = metric_closure(G, weight=weight)
H = M.subgraph(terminal_nodes)
# Use the 'distance' attribute of each edge provided by M.
mst_edges = nx.minimum_spanning_edges(H, weight="distance", data=True)
# Create an iterator over each edge in each shortest path; repeats are okay
edges = chain.from_iterable(pairwise(d["path"]) for u, v, d in mst_edges)
# For multigraph we should add the minimal weight edge keys
if G.is_multigraph():
edges = (
(u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges
)
T = G.edge_subgraph(edges)
return T
| [
"agoes.minarno@gmail.com"
] | agoes.minarno@gmail.com |
d00e03a5d6e2f5c023b3cfd468bcf23e8c80a838 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02409/s275514572.py | 76183adbf22944843da70b0b0f778f92ff20fd49 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | bilding = [[[0 for x in range(10)] for x in range(3)]for x in range(4)]
n = int(input())
for k in range(n):
b, f, r, v = map(int, raw_input().split())
bilding[b-1][f-1][r-1] += v
for b in range(4):
for f in range(3):
print(" "+" ".join(map(str, bilding[b][f])))
if b < 3:
print("#"*20) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4806bb126dbfe932a85b60a9b3914c0aea218210 | 38558ac2e78837e7f975364f03a1f55fb02103af | /BASIC TOOL PROGRAM/fibo1.py | 80c92885b451da1d16be6a515d0476e3e78a53d9 | [] | no_license | SOURADEEP-DONNY/WORKING-WITH-PYTHON | a0bc2ff5ddab1b25563927c8f361c6512683d6ff | 5198d14f0711a3ba7f2fe8bac61d6404c20ea40c | refs/heads/master | 2023-07-14T04:49:08.399519 | 2021-08-29T15:22:33 | 2021-08-29T15:22:33 | 270,723,307 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | def Fibonacci(n):
if n < 0:
print("Incorrect input")
elif n == 0:
return 0
elif n == 1 or n == 2:
return 1
else:
return Fibonacci(n-1) + Fibonacci(n-2)
number=int(input())
for i in range(0,number+1,1):
print(Fibonacci(i))
| [
"noreply@github.com"
] | SOURADEEP-DONNY.noreply@github.com |
6a31fc78953cf06d9acabfb6b9b2db3def13b768 | 34b94033b5bbb43c5ffd1c7e9672e46ce735ebf7 | /.circleci/checklhe/lhefile.py | ff69dcc10b63de537987cd49e8d241f81ae7e819 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | JHUGen/JHUGen | c85329874bf3778954c4b8061a3098eea1a926ef | 2854cbfc3d82122fbfce22dcea2e83ca4312f7f3 | refs/heads/master | 2023-09-01T03:39:35.212476 | 2023-08-28T20:50:59 | 2023-08-28T20:50:59 | 38,982,074 | 4 | 18 | Apache-2.0 | 2023-08-29T14:21:26 | 2015-07-12T23:29:19 | Fortran | UTF-8 | Python | false | false | 6,111 | py | import collections
import ROOT
import config
import globalvariables
import event
import particle
class LHEFile:
def __init__(self, filename):
globalvariables.init()
if filename[-4:] != ".lhe":
raise ValueError(filename + " does not end in .lhe")
self.filename = filename
self.f = open(filename)
self.nevents = 0
self.n4e = 0
self.n4mu = 0
self.n2e2mu = 0
self.linenumber = 0
self.incomment = False
self.sawinitblock = False
self.processidlist = []
self.VegasNc2 = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
print " ", self.nevents, "events"
print " ", self.n4e, "4e events"
print " ", self.n4mu, "4mu events"
print " ", self.n2e2mu, "2e2mu events"
self.f.close()
if self.VegasNc2 is not None and self.nevents != self.VegasNc2:
self.raiseerror("VegasNc2={}, but {} events!".format(self.VegasNc2, self.nevents))
def raiseerror(self, msg):
if config.raiseerror:
raise IOError(msg)
else:
print msg
def readevent(self):
while "<event" not in self.nextline():
if not self.line: #eof
return None
if "</event>" in self.line:
self.raiseerror("Extra </event>! " + str(self.linenumber))
if "<init>" in self.line:
if self.sawinitblock:
self.raiseerror("Extra init block! " + str(self.linenumber))
self.sawinitblock = True
data = self.nextline().split()
try:
[int(data[i]) for i in (0, 1, 4, 5, 6, 7, 8, 9)]
[float(data[i]) for i in (2, 3)]
except (ValueError, IndexError):
self.raiseerror("Bad init line 1!")
nprocesses = int(data[9])
for p in range(nprocesses):
data = self.nextline().split()
if "</init>" in self.line:
self.raiseerror("Not enough lines in init block!")
break
try:
[float(data[i]) for i in (0, 1, 2)]
if float(data[0]) < 0:
self.raiseerror("Pythia doesn't like negative cross sections!")
int(data[3])
for i in range(3, len(data)):
self.processidlist.append(int(data[i]))
except (ValueError, IndexError):
self.raiseerror("Bad init line %i!" % (2+p))
while "</init>" not in self.nextline() and "<event>" not in self.line:
if self.line.split():
self.raiseerror("Extra line in init block!")
if "<event>" in self.line:
self.raiseerror("No </init>!")
break
if "<!--" in self.line:
if not self.line.strip().startswith("<!--"):
self.raiseerror("Warning: comment begins in the middle of a line\n"
"(ok in itself, but other problems may not be detected)! " + str(self.linenumber))
if self.incomment:
self.raiseerror("<!-- inside a comment! " + str(self.linenumber))
self.line = self.line.replace("<!--", "", 1)
if "<!--" in self.line:
self.raiseerror("Warning: multiple <!-- in one line\n"
"(ok in itself, but other problems may not be detected!" + str(self.linenumber))
self.incomment = True
if "-->" in self.line:
if not self.line.strip().endswith("-->"):
self.raiseerror("Warning: comment ends in the middle of a line\n"
"(ok in itself, but problems may not be detected)! " + str(self.linenumber))
if not self.incomment:
self.raiseerror("--> not preceded by <!--! " + str(self.linenumber))
self.line = self.line.replace("-->", "", 1)
if "-->" in self.line:
self.raiseerror("Warning: multiple --> in one line\n"
"(ok in itself, but other problems may not be detected!" + str(self.linenumber))
self.incomment = False
if "--" in self.line and self.incomment:
self.raiseerror("-- in a comment! " + str(self.linenumber))
if self.incomment and "VegasNc2=" in self.line and ("VBFoffsh_run=*" in self.line or not any("Process={}".format(_) in self.line for _ in (66,67,68,69))):
for argument in self.line.split():
if argument.startswith("VegasNc2="):
self.VegasNc2 = int(argument.split("=")[-1])
if not self.sawinitblock:
self.raiseerror("No <init>!")
ev = event.Event(self.linenumber, self.processidlist)
ev.setfirstline(self.nextline())
while "</event>" not in self.nextline():
if not self.line:
self.raiseerror("File ends in the middle of an event!")
return None
if "<event" in self.line:
self.raiseerror("Extra <event>! " + str(self.linenumber))
try:
ev.addparticle(self.line)
except particle.BadParticleLineError:
continue
ev.finished()
self.nevents += 1
if ev.is4e(): self.n4e += 1
if ev.is4mu(): self.n4mu += 1
if ev.is2e2mu(): self.n2e2mu += 1
return ev
def nextline(self):
self.linenumber += 1
self.line = self.f.readline()
return self.line
def __iter__(self):
return self
def next(self):
ev = self.readevent()
if ev is not None:
return ev
raise StopIteration
| [
"jroskes1@jhu.edu"
] | jroskes1@jhu.edu |
338fab704d4d753f954d7a50cb2cd98a24a2f00e | f40cc44ebfc337326577c91cd88d0c1dd845b098 | /LuminarPythonPrograms/LoopingProgram/printEven.py | 681b99cde15812529009265a2004a2f511ba6e06 | [] | no_license | Aswin2289/LuminarPython | 6e07d6f9bf6c8727b59f38f97f5779a33b2fab0d | ba633a276dd79bbf214cfceac2413c894eaa1875 | refs/heads/master | 2023-01-01T07:52:41.598110 | 2020-10-13T04:34:49 | 2020-10-13T04:34:49 | 290,109,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | #program to print up to rabge
i=int(input("Enter ur Lower Limit"))
num=int(input("Enter ur range"))
while(i<=num):
if (i % 2 == 0):
print(i)
i+=1 | [
"aswinabraham4@gmail.com"
] | aswinabraham4@gmail.com |
e12f42185cab421d33cb53913f513b98dac13e7f | 3f13885fdb0649374d866d24a43f86ccc6b4c782 | /apps/workflow/api/app.py | 9d58254a7068da6ba7bf6ec51644b3c0e2badd4c | [] | no_license | linkexf/oneops | 426b271c00c5b4b4c55d1d91bf42030dab29623a | 64a9c7fd949b6220234a276614ab6555dc8cc17c | refs/heads/master | 2020-12-10T04:45:55.681731 | 2019-11-28T09:02:30 | 2019-11-28T09:02:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,417 | py | # -*- coding: utf-8 -*-
import uuid
import requests
import simplejson as json
from pprint import pprint
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import View
from common.mixins import JSONResponseMixin
from common.utils.zabbix_api import get_access_token, get_host_ids, get_monitor_item_ids, update_monitor_item
from cmdb.models.asset import Server
from cmdb.models.business import App, BizMgtDept
from cmdb.views.ip import get_ips_by_server_id
from workflow.models import CommonFlow, CommonFlowArg
from ssh.models.host_user import HostUserAsset
from job.tasks.ansible_api import AnsibleAPI
class AnsibleHostsGroupInitAPIView(JSONResponseMixin, View):
def get(self, request, *args, **kwargs):
try:
flow_id = kwargs.get('flow_id')
cf = CommonFlow.objects.get(id=flow_id)
app_id = CommonFlowArg.objects.get(cf=cf, arg='app_id').value
app = App.objects.get(id=app_id)
dept = app.biz_mgt_dept
while True:
if dept.parent_id == 2:
dept_code = dept.dept_code
break
else:
dept = BizMgtDept.objects.get(id=dept.parent_id)
pre_host = [s.hostname for s in Server.objects.filter(pre_app=app, app_env='pre')]
beta_host = [s.hostname for s in Server.objects.filter(pre_app=app, app_env='beta')]
prod_host = [s.hostname for s in Server.objects.filter(pre_app=app, app_env='prod')]
result = '''[{0}-{1}-pre]\n{2}\n[{0}-{1}-beta]\n{3}\n[{0}-{1}-prod]\n{4}\n'''.format(
dept_code, app.app_code, '\n'.join(pre_host), '\n'.join(beta_host), '\n'.join(prod_host))
res = {'code': 0, 'result': result}
except Exception as e:
res = {'code': 1, 'errmsg': '执行出错:%s' % str(e)}
return self.render_json_response(res)
class OpsProjectCreateAPIView(JSONResponseMixin, View):
def get(self, request, *args, **kwargs):
try:
flow_id = kwargs.get('flow_id')
cf = CommonFlow.objects.get(id=flow_id)
app_id = CommonFlowArg.objects.get(cf=cf, arg='app_id').value
app = App.objects.get(id=app_id)
dept = app.biz_mgt_dept
while True:
if dept.parent_id == 2:
dept_code = dept.dept_code
break
else:
dept = BizMgtDept.objects.get(id=dept.parent_id)
data = {
"app_code": "prod_" + app.app_code,
"app_type": app.app_type.upper(),
"comment": app.comment,
"p_script": "/jenkins/data/deploy_war.sh" if app.app_type == 'war' else "/jenkins/data/deploy_jar.sh",
"p_tomcat": '/data/{}-{}'.format(app.tomcat_port, app.app_code),
"p_war": app.app_code,
"p_prehost": '{0}-{1}-pre'.format(dept_code, app.app_code),
"p_host1": '{0}-{1}-beta'.format(dept_code, app.app_code),
"p_host2": '{0}-{1}-prod'.format(dept_code, app.app_code)
}
res = {'code': 0, 'result': data}
except Exception as e:
res = {'code': 1, 'errmsg': '执行出错:%s' % str(e)}
return self.render_json_response(res)
def post(self, request, *args, **kwargs):
try:
post_data = request.POST.copy().dict()
p_group = ','.join(request.POST.getlist('p_group', []))
print(p_group, post_data)
post_data['p_group'] = p_group
post_data['principal'] = "1"
post_data['p_user'] = "1"
headers = {"Content-Type": "application/json"}
data = {
"jsonrpc": "2.0",
"id": 1,
"method": "project.create2",
"params": post_data
}
pprint(data)
ret = requests.post("http://opsapi.yadoom.com/api", headers=headers, json=data)
res = json.loads(json.loads(ret.text)['result'])
pprint(res)
except Exception as e:
res = {'code': 1, 'errmsg': '执行出错:%s' % str(e)}
return self.render_json_response(res)
class OpsRoleListAPIView(JSONResponseMixin, View):
def get(self, request, *args, **kwargs):
try:
headers = {"Content-Type": "application/json"}
data = {
"jsonrpc": "2.0",
"id": 1,
"method": "role.getlist2",
"params": {}
}
ret = requests.post("http://opsapi.yadoom.com/api", headers=headers, json=data)
res = json.loads(ret.text)
except Exception as e:
res = {'code': 1, 'errmsg': '执行出错:%s' % str(e)}
return self.render_json_response(res)
class AppOfflineCodeBackupAPIView(LoginRequiredMixin, JSONResponseMixin, View):
def post(self, request, *args, **kwargs):
try:
app_id = kwargs.get('app_id')
app = App.objects.get(id=app_id)
for host in app.app_server.all():
ips = get_ips_by_server_id(host.id)
if not ips:
print(host.hostname, host.login_address, " 没有关联ip地址!")
continue
hua = HostUserAsset.objects.filter(asset=host, host_user__username='root')
if hua:
hu = hua[0].host_user
kwargs = {
'resource': list(),
'hosts_file': ["/data/ansible/inventory/public/hosts_all"],
'host_user': hu.id
}
playbook = ["/data/ansible/playbook/admin/app_offline.yml"]
extra_vars = {"apphost": host.login_address.split(":")[0], "app_code": app.app_code,
"tomcat_port": app.tomcat_port, "app_type": app.app_type}
ansible_api = AnsibleAPI(0, str(uuid.uuid4()), **kwargs)
print(ansible_api.run_playbook(playbook, extra_vars))
else:
print(host.hostname, host.login_address, " 未绑定用户为root的HostUser!")
res = {'code': 0, 'result': '任务已提交,请再手动确认是否执行成功!'}
except Exception as e:
res = {'code': 1, 'result': str(e)}
return self.render_json_response(res)
class AppOfflineDisableMonitorAPIView(LoginRequiredMixin, JSONResponseMixin, View):
def post(self, request, *args, **kwargs):
try:
app_code = kwargs.get('app_code')
app = App.objects.get(app_code=app_code)
ip_list = []
for host in app.app_server.all():
ips = get_ips_by_server_id(host.id)
if ips:
ip_list.append(ips[0])
tk = get_access_token()
host_ids = get_host_ids(tk, ip_list)
item_ids = get_monitor_item_ids(tk, host_ids, 'status[%d]' % app.tomcat_port)
print(tk, ip_list, host_ids, item_ids)
update_monitor_item(tk, item_ids, 0)
res = {'code': 0, 'result': '已经禁用!'}
except Exception as e:
res = {'code': 1, 'result': str(e)}
return self.render_json_response(res)
| [
"andykaiyu@163.com"
] | andykaiyu@163.com |
d1cf5a4253fa7bcd261e49182c6b3867f11c3dca | 5bb1ae9b9e6592def632b8a95def32b3a2d742d5 | /movie_wish/test.py | 2fe8adc8befd22f8eebf640259ea2602a1fec457 | [] | no_license | fiso0/my_python | af1132637a4ad92036ea0a949fa93df6f904b190 | 391def01ecdb97b8e3008235910a596bb5a9b52c | refs/heads/master | 2021-01-17T15:52:36.745999 | 2016-10-29T08:37:51 | 2016-10-29T08:37:51 | 58,641,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | import csv
with open('test1.csv',newline='') as f:
spamreader = csv.reader(f)
for line in spamreader:
print(line[0])
| [
"fiso0@126.com"
] | fiso0@126.com |
0dbf5d407aa3f004da79893bc38e9e510244c139 | 1c25798a9ae17ca228383fcd04a1e801415a78e7 | /Chapter 5 Loops/Ex_26_27_Sum of series.py | 760239321ae5adb7938fc99cbc0a019b58c4dfe3 | [] | no_license | padamcs36/Introduction_to_Python_By_Daniel | 688c56fff598617e979a5f71e9a48e50844ad7ea | 8b8f00c9d93428c58df9c90e5edd8f75a1662647 | refs/heads/main | 2023-02-22T08:11:01.450054 | 2021-01-28T09:45:01 | 2021-01-28T09:45:01 | 333,700,411 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | import math
import time
#1/3 + 3/5 + 5/7+......+97//99
#2n-1/2n+1
sum = 0
for i in range(1, 100, 2):
k = i / (i+2)
sum += k
print(format(sum, ".2f"))
#Compute Value of PI
sum = 0
startTime = time.time()
for i in range(1, 100000+1):
numerator = math.pow(-1, i+1)
denomenator = 2 * i - 1
k = numerator / denomenator
sum += k
if i == 10000 or i == 20000 or i == 30000 or i == 40000 or \
i == 50000 or i == 60000 or i == 70000 or i == 80000 or \
i == 90000 or i == 100000:
print("PI is: ",format(4 * sum, ".3f"), "for i =", i)
endTime = time.time()
totalTime = (endTime - startTime)
print(format(totalTime, ".4f"), "seconds")
print("Sum is: ",format(4 * sum, ".3f")) | [
"noreply@github.com"
] | padamcs36.noreply@github.com |
654bc050b5ccc23d7d0daef531faeee8f08d0ec6 | a86bca3e88fc3012bc9805c74c2e752262370326 | /AI/test_22.py | 2e8f98cc381cba052ae8f1f7c79db162df538d4e | [
"MIT"
] | permissive | osamhack2021/AI_NoYoutube_60Duo | 4921f7c838776305d8dc00d6ceb04b2190565916 | c1e34b7b506b43c9be6c39da3211fac49bfbcd14 | refs/heads/main | 2023-08-11T19:24:45.560000 | 2021-10-13T15:00:38 | 2021-10-13T15:00:38 | 405,925,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | print('hello')
| [
"noreply@github.com"
] | osamhack2021.noreply@github.com |
884d30f7edf976714898409cef9a4c40addf737a | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/custom_field.py | f8a5aeca1f2ddc517de2d87bccbe37aa652592cb | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,492 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CustomField:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'value': 'str'
}
attribute_map = {
'name': 'name',
'value': 'value'
}
def __init__(self, name=None, value=None):
"""CustomField - a model defined in huaweicloud sdk"""
self._name = None
self._value = None
self.discriminator = None
if name is not None:
self.name = name
if value is not None:
self.value = value
@property
def name(self):
"""Gets the name of this CustomField.
自定义属性名
:return: The name of this CustomField.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CustomField.
自定义属性名
:param name: The name of this CustomField.
:type: str
"""
self._name = name
@property
def value(self):
"""Gets the value of this CustomField.
自定义属性对应的值
:return: The value of this CustomField.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this CustomField.
自定义属性对应的值
:param value: The value of this CustomField.
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CustomField):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
19f2ccbf1a31225761a59aaa8e647b1c518ebff7 | 11aac6edab131293027add959b697127bf3042a4 | /findTheDistanceValue.py | bf05470d870073c26a25a66b2d735dd8220abcf5 | [] | no_license | jdanray/leetcode | a76b3436002b31865967b757b73c85992636383b | fd736af3e79899b86dac89d4d925d5bd985944ad | refs/heads/master | 2023-08-15T01:20:05.110565 | 2023-08-14T00:25:58 | 2023-08-14T00:25:58 | 148,686,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | # https://leetcode.com/problems/find-the-distance-value-between-two-arrays/
class Solution(object):
def findTheDistanceValue(self, arr1, arr2, d):
dist = 0
for n1 in arr1:
if not any(abs(n1 - n2) <= d for n2 in arr2):
dist += 1
return dist
| [
"jdanray@users.noreply.github.com"
] | jdanray@users.noreply.github.com |
cee25608874f37bd66b4725cf44924e0d439e5e8 | 0f7cf365e00e3e116deca345ceb53588a0aee152 | /src/collective/jazzport/interfaces.py | dc33b605b2b04ec04c62ef85cb2436223579b31b | [] | no_license | datakurre/collective.jazzport | d66c59556a1055d46843c672babb4d8764732e19 | cf9d46dd50e40ea0437f6059ed9bc7ee57bb24f2 | refs/heads/master | 2023-06-11T03:42:09.980698 | 2023-05-26T06:37:17 | 2023-05-26T06:37:17 | 23,119,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # -*- coding: utf-8 -*-
from zope import schema
from zope.i18nmessageid import MessageFactory
from zope.interface import Interface
_ = MessageFactory('collective.jazzport')
class IJazzportLayer(Interface):
"""Marker interface that defines a Zope 3 browser layer"""
class IJazzportSettings(Interface):
portal_types = schema.Set(
title=_(u'Portal types'),
description=_(u'Select downloadable portal types'),
value_type=schema.Choice(
title=_(u'Type'),
vocabulary='plone.app.vocabularies.ReallyUserFriendlyTypes'
),
required=False
)
| [
"asko.soukka@iki.fi"
] | asko.soukka@iki.fi |
0f41d9dff29f888d64ce2cea72375459185714ae | fbe05017b477a8b6c3603be3f2003c4a80854868 | /src/Ner_tag.py | c985de37f08fe224ed29a7391ead1068256d594b | [] | no_license | enningxie/user-level | 7cb8e9a30090adabea085bde046049c52c86cf84 | a3ef1c2b51b39eceef0f95c3f251a810e2bae801 | refs/heads/master | 2020-08-08T08:44:50.514365 | 2019-10-31T06:52:31 | 2019-10-31T06:52:31 | 213,796,188 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,740 | py | """
################################Ner_tag.py################################
程序名称: Ner_tag.py
功能描述: 实体标注
创建人名: wuxinhui
创建日期: 2019-07-12
版本说明: v1.0
################################Ner_tag.py################################
"""
import numpy as np
import re
from random import shuffle
import copy
import jieba
import cn2an
import json
import random
def utils_func(src):
def strQ2B(ustring):
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 12288:
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstring += chr(inside_code)
return rstring
return strQ2B(src).strip().lower()
# functions
# find the ids of str for tar
def find_str(src, tar, idl):
ids = []
if idl == 0:
for i in range(0, len(src) - len(tar) + 1):
if src[i:i + len(tar)] == tar:
ids.append(i)
else:
for i in range(0, len(src) - len(tar) + 1):
if src[i:i + len(tar)] == tar and reg_parse(src, i, i + len(tar) - 1) == True:
ids.append(i)
return ids
# find the parse for src
def reg_parse(src, i, j):
R1, R2 = False, False
if i == 0:
R1 = True
else:
if re.match("[a-z0-9\\-]{2}", src[i - 1:i + 1], re.I) == None:
R1 = True
if j == len(src) - 1:
R2 = True
else:
if re.match("[a-z0-9\\-]{2}", src[j:j + 2], re.I) == None:
R2 = True
if R1 == True and R2 == True:
return True
else:
return False
# the tag functions
def tag_func(sen, S, tag, label, Allow, idl):
S = set(S)
S = [s for s in S if s in sen]
# 提取索引列表
idList = []
for i in S:
ids = find_str(sen, i, idl)
ids = [list(range(w, w + len(i))) for w in ids]
idList.extend(ids)
idList.sort(key=len)
idList.reverse()
"""
# 去重索引列表
idSet = []
idList.sort(key=len)
while(len(idList) != 0):
temp = idList.pop()
lab = 0
for i in idSet:
if len(set(temp).intersection(set(i))) > 0:
lab = 1
break
if lab == 0:
idSet.append(temp)
"""
# 标注索引列表
for ids in idList:
table = [tag[i][0] for i in ids]
flag = [tag[ids[0]], tag[ids[-1]]]
if not (set(table).issubset(set(Allow))):
continue
if re.search("O$|BEG$", flag[0], re.I) == None or re.search("O$|END$", flag[1], re.I) == None:
continue
if len(ids) > 1:
tag[ids[0]] = label + "_BEG"
tag[ids[-1]] = label + "_END"
for i in ids[1:-1]:
tag[i] = label + "_MID"
else:
tag[ids[0]] = label
return tag
# extract the tag from sen
def tag_extract(sen, tag, label):
labelL = []
for ids in range(len(sen)):
if tag[ids] == label + "_BEG":
tmp = sen[ids]
elif tag[ids] == label + "_MID":
tmp += sen[ids]
elif tag[ids] == label + "_END":
tmp += sen[ids]
labelL.append(tmp)
elif tag[ids] == label:
labelL.append(sen[ids])
else:
tmp = ""
return labelL
# classes
# main Ner_tag spi class
class Ner_tag(object):
"""docstring for Ner_tag"""
def __init__(self, file):
super(Ner_tag, self).__init__()
self.__kg = json.load(open(file, "rb"))
def set_ner_kg(self, kg):
self.__kg = kg
return
def get_ner_kg(self):
return self.__kg
def ner_tag_api(self, sen):
"""
finsh the tag of the sentence, acquire all tags | char level
"""
sen = utils_func(sen)
tag = ["O"] * len(sen)
labels = [l for l in self.__kg.keys() if l not in ["B", "S", "M"]]
for l in labels:
try:
regexp = [re.compile(r) for r in self.__kg[l]["regexp"]]
S = sum([r.findall(sen) for r in regexp], [])
except:
value = self.__kg[l]["value"]
S = [v for v in value if v in sen]
tag = tag_func(sen, S, tag, l, ["O"], 0)
tag = tag_func(sen, self.__kg["B"]["value"], tag, "B", ["O"], 1)
tag = tag_func(sen, self.__kg["S"]["value"], tag, "S", ["O"], 0)
tag = tag_func(sen, self.__kg["M"]["value"], tag, "M", ["S", "O"], 0)
Blabel = tag_extract(sen, tag, "B")
Stalk = sum([self.__kg["S"]["map"][b] for b in Blabel], [])
tag = tag_func(sen, Stalk, tag, "S", ["O"], 1)
Mtalk = sum([self.__kg["M"]["map"][b] for b in Blabel], [])
tag = tag_func(sen, Mtalk, tag, "M", ["O"], 1)
return tag
def ner_log_api(self, sen):
tag = self.ner_tag_api(sen)
B = tag_extract(sen, tag, "B")
S = tag_extract(sen, tag, "S")
car_info = {}
car_info["serie"] = []
car_info["color"] = []
car_info["model"] = []
# extract the car serie
for s in S:
label = 0
for b in B:
if s.lower() in self.__kg["B"]["map"][b]:
car_info["serie"].append(b + s)
label = 1
break
if label == 0:
car_info["serie"].append(s)
for b in B:
label = 0
for i in car_info["serie"]:
if b in i:
label = 1
break
if label == 0:
car_info["serie"].append(b)
# extract the car model
Y = tag_extract(sen, tag, "Y")
N = tag_extract(sen, tag, "N")
E = tag_extract(sen, tag, "E")
G = tag_extract(sen, tag, "G")
D = tag_extract(sen, tag, "D")
Q = tag_extract(sen, tag, "Q")
I = tag_extract(sen, tag, "I")
M = tag_extract(sen, tag, "M")
car_info["model"].extend(Y)
car_info["model"].extend(N)
car_info["model"].extend(E)
car_info["model"].extend(G)
car_info["model"].extend(D)
car_info["model"].extend(Q)
car_info["model"].extend(I)
car_info["model"].extend(M)
# extract the car color
C = tag_extract(sen, tag, "C")
car_info["color"].extend(C)
return car_info
# main function
if __name__ == "__main__":
kg_file = "../data/kg.json"
Ner = Ner_tag(kg_file)
sen = "宝马x3红色豪华版2.0t4座;奥迪a6豪华版"
car_info = Ner.ner_log_api(sen)
print(car_info)
print(type(car_info))
| [
"enningxie@163.com"
] | enningxie@163.com |
f5159f6728ae1c9e4c5fe4a4bb1dd66b6e175470 | b3eb6f6144017e84e727bb65ba945916b6f5363c | /tests/integration_tests/explore/permalink/api_tests.py | a44bc70a7b49a7a0dd213871dc1f46d264ed564e | [
"Apache-2.0",
"OFL-1.1"
] | permissive | mistercrunch/superset | f838bd80144c48ea4dc27ae29db1df2521ef1bd5 | f144de4ee2bf213bb7e17f903bd3975d504c4136 | refs/heads/master | 2023-06-07T13:16:36.674565 | 2022-05-06T10:11:41 | 2022-05-06T10:11:41 | 56,703,070 | 17 | 6 | Apache-2.0 | 2023-03-04T00:13:28 | 2016-04-20T16:28:49 | TypeScript | UTF-8 | Python | false | false | 4,726 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pickle
from typing import Any, Dict, Iterator
from uuid import uuid3
import pytest
from sqlalchemy.orm import Session
from superset import db
from superset.key_value.models import KeyValueEntry
from superset.key_value.types import KeyValueResource
from superset.key_value.utils import decode_permalink_id, encode_permalink_key
from superset.models.slice import Slice
from tests.integration_tests.base_tests import login
from tests.integration_tests.fixtures.client import client
from tests.integration_tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices,
load_world_bank_data,
)
from tests.integration_tests.test_app import app
@pytest.fixture
def chart(load_world_bank_dashboard_with_slices) -> Slice:
with app.app_context() as ctx:
session: Session = ctx.app.appbuilder.get_session
chart = session.query(Slice).filter_by(slice_name="World's Population").one()
return chart
@pytest.fixture
def form_data(chart) -> Dict[str, Any]:
datasource = f"{chart.datasource.id}__{chart.datasource.type}"
return {
"chart_id": chart.id,
"datasource": datasource,
}
@pytest.fixture
def permalink_salt() -> Iterator[str]:
from superset.key_value.shared_entries import get_permalink_salt, get_uuid_namespace
from superset.key_value.types import SharedKey
key = SharedKey.EXPLORE_PERMALINK_SALT
salt = get_permalink_salt(key)
yield salt
namespace = get_uuid_namespace(salt)
db.session.query(KeyValueEntry).filter_by(
resource=KeyValueResource.APP,
uuid=uuid3(namespace, key),
)
db.session.commit()
def test_post(client, form_data: Dict[str, Any], permalink_salt: str):
login(client, "admin")
resp = client.post(f"api/v1/explore/permalink", json={"formData": form_data})
assert resp.status_code == 201
data = json.loads(resp.data.decode("utf-8"))
key = data["key"]
url = data["url"]
assert key in url
id_ = decode_permalink_id(key, permalink_salt)
db.session.query(KeyValueEntry).filter_by(id=id_).delete()
db.session.commit()
def test_post_access_denied(client, form_data):
login(client, "gamma")
resp = client.post(f"api/v1/explore/permalink", json={"formData": form_data})
assert resp.status_code == 404
def test_get_missing_chart(client, chart, permalink_salt: str) -> None:
from superset.key_value.models import KeyValueEntry
chart_id = 1234
entry = KeyValueEntry(
resource=KeyValueResource.EXPLORE_PERMALINK,
value=pickle.dumps(
{
"chartId": chart_id,
"datasetId": chart.datasource.id,
"formData": {
"slice_id": chart_id,
"datasource": f"{chart.datasource.id}__{chart.datasource.type}",
},
}
),
)
db.session.add(entry)
db.session.commit()
key = encode_permalink_key(entry.id, permalink_salt)
login(client, "admin")
resp = client.get(f"api/v1/explore/permalink/{key}")
assert resp.status_code == 404
db.session.delete(entry)
db.session.commit()
def test_post_invalid_schema(client) -> None:
login(client, "admin")
resp = client.post(f"api/v1/explore/permalink", json={"abc": 123})
assert resp.status_code == 400
def test_get(client, form_data: Dict[str, Any], permalink_salt: str) -> None:
login(client, "admin")
resp = client.post(f"api/v1/explore/permalink", json={"formData": form_data})
data = json.loads(resp.data.decode("utf-8"))
key = data["key"]
resp = client.get(f"api/v1/explore/permalink/{key}")
assert resp.status_code == 200
result = json.loads(resp.data.decode("utf-8"))
assert result["state"]["formData"] == form_data
id_ = decode_permalink_id(key, permalink_salt)
db.session.query(KeyValueEntry).filter_by(id=id_).delete()
db.session.commit()
| [
"noreply@github.com"
] | mistercrunch.noreply@github.com |
2f713e0dd1eb4f9a24282abd34f0cd299d4f793e | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/operations/_express_route_ports_operations.py | cccf62982af78ecf2653e1deea059c7298802791 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 30,025 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsOperations(object):
"""ExpressRoutePortsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRoutePort"
"""Retrieves the requested ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of ExpressRoutePort.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePort, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.ExpressRoutePort
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "models.ExpressRoutePort"
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRoutePort"
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRoutePort')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "models.ExpressRoutePort"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ExpressRoutePort"]
"""Creates or updates the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to the create ExpressRoutePort operation.
:type parameters: ~azure.mgmt.network.v2020_06_01.models.ExpressRoutePort
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRoutePort or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_06_01.models.ExpressRoutePort]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePort"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRoutePort"
"""Update ExpressRoutePort tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to update ExpressRoutePort resource tags.
:type parameters: ~azure.mgmt.network.v2020_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePort, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.ExpressRoutePort
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ExpressRoutePortListResult"]
"""List all the ExpressRoutePort resources in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ExpressRoutePortListResult"]
"""List all the ExpressRoutePort resources in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
def generate_loa(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
request, # type: "models.GenerateExpressRoutePortsLOARequest"
**kwargs # type: Any
):
# type: (...) -> "models.GenerateExpressRoutePortsLOAResult"
"""Generate a letter of authorization for the requested ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of ExpressRoutePort.
:type express_route_port_name: str
:param request: Request parameters supplied to generate a letter of authorization.
:type request: ~azure.mgmt.network.v2020_06_01.models.GenerateExpressRoutePortsLOARequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenerateExpressRoutePortsLOAResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.GenerateExpressRoutePortsLOAResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GenerateExpressRoutePortsLOAResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.generate_loa.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'GenerateExpressRoutePortsLOARequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenerateExpressRoutePortsLOAResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_loa.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRoutePorts/{expressRoutePortName}/generateLoa'} # type: ignore
| [
"noreply@github.com"
] | paultaiton.noreply@github.com |
d28962804b2abad424e448abfdee8dd9f2b68f69 | 06cc07502c88cfda7335a7605eef7e8ec0043c01 | /app.py | dfa30cd22cd10f084ddb6cfb501479e4bd6336f8 | [] | no_license | sumayah20-meet/Y2-Individual-Project-yl1920 | 16f2b767a9f485b585b2a49e65fc54c74bbae543 | 2e9d06774feeb80b2c625972d1db700de87428fd | refs/heads/master | 2020-12-23T05:31:36.609884 | 2020-01-30T14:52:15 | 2020-01-30T14:52:15 | 237,051,980 | 0 | 0 | null | 2020-01-29T18:20:09 | 2020-01-29T18:20:08 | null | UTF-8 | Python | false | false | 1,359 | py | from flask import Flask, request, redirect, url_for, render_template
from flask import session as login_session
from databases import *
app=Flask(__name__)
app.secret_key="MY_SUPER_SECRET_KEY"
@app.route('/', methods =["GET","POST"])
def signIN():
if request.method == "GET":
return render_template('signin.html')
else:
username = request.form["uname"]
password = request.form["psw"]
s=signin(username,password)
if s:
return render_template("index.html")
else:
print("try again")
return render_template('signin.html')
@app.route('/signup',methods=["POST","GET"])
def signUp():
if request.method == "GET":
return render_template('signup.html')
else:
save(request.form['email'],request.form['psw'])
return redirect(url_for('signIN'))
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/about-us')
def hello():
return render_template ('about-us.html')
@app.route('/contact', methods=["GET","POST"])
def contact():
if request.method == "GET":
return render_template ('contact.html')
else:
username = request.form['uname']
password = request.form["psw"]
save(username,password)
return render_template('index.html',
u = username,
p = password
)
app.run(debug = True) | [
"myname21@meet.mit.edu"
] | myname21@meet.mit.edu |
d99790c7a7ab62bf4ceef5930255e68034969f27 | 49f81640f961e74668116b2600fe3c77646cc94d | /notebooks/howtofit/chapter_phase_api/src/plot/dataset_plots.py | c4d3eae50801dbc3904c5646a13ff71e9d385c1e | [] | no_license | knut0815/autofit_workspace | 5b97b37f5cf28f6b723c7cca73fa6b9a95e8ffc2 | 4a4fdacf62614150f500716cc8eca1613ae2e4af | refs/heads/master | 2023-03-01T23:32:06.681456 | 2021-02-08T19:48:18 | 2021-02-08T19:48:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,225 | py | import matplotlib.pyplot as plt
from os import path
from src.dataset import dataset as ds
from src.plot import line_plots
"""
These functions are simple matplotlib calls that plot components of our Line class, specifically its data and
noise-map. We additional include a function that plots the dataset on a single subplot.
Storing simple functions like this for plotting components of our `Dataset` will prove beneficial when using the
`Aggregator`, when it comes to inspecting the results of a model-fit after they have been completed.
"""
def subplot_dataset(
dataset: ds.Dataset,
output_path: str = None,
output_filename: str = None,
output_format: str = "show",
):
"""
Plot the `Dataset` using a subplot containing both its data and noise-map.
Parameters
-----------
dataset : Dataset
The observed `Dataset` which is plotted.
output_path : str
The path where the image of the data is output, if saved as a `.png`.
output_filename : str
The name of the file the image of the data is output too, if saved as a `.png`.
output_format : str
Whether the data is output as a `.png` file ("png") or displayed on the screen ("show").
"""
plt.figure(figsize=(18, 8))
plt.subplot(1, 2, 1)
data(
dataset=dataset,
output_path=output_path,
output_filename=output_filename,
output_format=output_format,
bypass_show=True,
)
plt.subplot(1, 2, 2)
noise_map(
dataset=dataset,
output_path=output_path,
output_filename=output_filename,
output_format=output_format,
bypass_show=True,
)
if "show" in output_format:
plt.show()
elif "png" in output_format:
plt.savefig(path.join(output_path, f"{output_filename}.png"))
plt.clf()
def data(
dataset: ds.Dataset,
output_path: str = None,
output_filename: str = None,
output_format: str = "show",
bypass_show: bool = False,
):
"""
Plot the data values of a `Dataset` object.
Parameters
-----------
dataset : Dataset
The observed `Dataset` whose data is plotted.
output_path : str
The path where the image of the data is output, if saved as a `.png`.
output_filename : str
The name of the file the image of the data is output too, if saved as a `.png`.
output_format : str
Whether the data is output as a `.png` file ("png") or displayed on the screen ("show").
bypass_show : bool
If `True` the show or savefig function is bypassed. This is used when plotting subplots.
"""
line_plots.figure(
xvalues=dataset.xvalues,
line=dataset.data,
errors=dataset.noise_map,
title="Data",
ylabel="Data Values",
color="k",
output_path=output_path,
output_filename=output_filename,
output_format=output_format,
bypass_show=bypass_show,
)
def noise_map(
dataset: ds.Dataset,
output_path: str = None,
output_filename: str = None,
output_format: str = "show",
bypass_show: bool = False,
):
"""
Plot the noise-map of a `Dataset` object.
Parameters
-----------
dataset : Dataset
The observed `Dataset` whose noise-map is plotted.
output_path : str
The path where the image of the noise-map is output, if saved as a `.png`.
output_filename : str
The name of the file the image of the noise-map is output too, if saved as a `.png`.
output_format : str
Whether the noise-map is output as a `.png` file ("png") or displayed on the screen ("show").
bypass_show : bool
If `True` the show or savefig function is bypassed. This is used when plotting subplots.
"""
line_plots.figure(
xvalues=dataset.xvalues,
line=dataset.noise_map,
title="Noise-Map",
ylabel="Noise-Map",
color="k",
output_path=output_path,
output_filename=output_filename,
output_format=output_format,
bypass_show=bypass_show,
)
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
8a29a1d2704b98046bf0712111e3b92806c18cd4 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/ajbrock_Neural-Photo-Editor/Neural-Photo-Editor-master/NPE.py | 4aac4ab06b4f040bf7e295f5827912de3c7781c5 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 16,656 | py | ### Neural Photo Editor
# A Brock, 2016
## Gui tests
# Note: THe width of the edge of the window in X is 8, the height of the top of the window in y is 30.
# TO DO: Go through and fix all the shit we've broken, lol
# TO DO: Try and remember the change we wanted to make for imgradRGB that involved not passing a fat array and indexing it?
# Maybe it was keep the rest of the image constant but only modify that part? That would be a simple change.
# TO DO: Clean up, reorganize, consider aliases (espcially the tkinter * import)
# TODO: Clean up all variable names, especially between function library and theano vars
# Idea: Localize changes? Apply a prior saying the changes are 100% near the cursor and fading to 0% far away. Combine that with mask
# Keep ERROR as a float32
# ^ Part of why we need this is clear with 550: if we have a reconstruction that misses a feature (turns open mouth to closed) then changing towards
# closed won't change the mask since the recon is already there. This will hopefully help account for mistakes
# Consider making MASK mean of abs instead of abs of mean?
# Consider making the imgradRGB use L1 loss instead of L2 loss?
# Final to-do: make everything nice and wrap that shit into functions
# Final to-do: May need to move widget creation up or down somehow
### Imports
from Tkinter import * # Note that I dislike the * on the Tkinter import, but all the tutorials seem to do that so I stuck with it.
from tkColorChooser import askcolor # This produces an OS-dependent color selector. I like the windows one best, and can't stand the linux one.
from collections import OrderedDict
from PIL import Image, ImageTk
import numpy as np
import scipy.misc
from API import IAN
### Step 1: Create theano functions
# Initialize model
model = IAN(config_path = 'IAN_simple.py', dnn = True)
### Prepare GUI functions
print('Compiling remaining functions')
# Create master
master = Tk()
master.title( "Neural Photo Editor" )
# RGB interpreter convenience function
def rgb(r,g,b):
return '#%02x%02x%02x' % (r,g,b)
# Convert RGB to bi-directional RB scale.
def rb(i):
# return rgb(int(i*int(i>0)),0, -int(i*int(i<0)))
return rgb(255+max(int(i*int(i<0)),-255),255-min(abs(int(i)),255), 255-min(int(i*int(i>0)),255))
# Convenience functions to go from [0,255] to [-1,1] and [-1,1] to [0,255]
def to_tanh(input):
return 2.0*(input/255.0)-1.0
def from_tanh(input):
return 255.0*(input+1)/2.0
# Ground truth image
GIM=np.asarray(np.load('CelebAValid.npz')['arr_0'][420])
# Image for modification
IM = GIM
# Reconstruction
RECON = IM
# Error between reconstruction and current image
ERROR = np.zeros(np.shape(IM),dtype=np.float32)
# Change between Recon and Current
DELTA = np.zeros(np.shape(IM),dtype=np.float32)
# User-Painted Mask, currently not implemented.
USER_MASK=np.mean(DELTA,axis=0)
# Are we operating on a photo or a sample?
SAMPLE_FLAG=0
### Latent Canvas Variables
# Latent Square dimensions
dim = [10,10]
# Squared Latent Array
Z = np.zeros((dim[0],dim[1]),dtype=np.float32)
# Pixel-wise resolution for latent canvas
res = 16
# Array that holds the actual latent canvas
r = np.zeros((res*dim[0],res*dim[1]),dtype=np.float32)
# Painted rectangles for free-form latent painting
painted_rects = []
# Actual latent rectangles
rects = np.zeros((dim[0],dim[1]),dtype=int)
### Output Display Variables
# RGB paintbrush array
myRGB = np.zeros((1,3,64,64),dtype=np.float32);
# Canvas width and height
canvas_width = 400
canvas_height = 400
# border width
bd =2
# Brush color
color = IntVar()
color.set(0)
# Brush size
d = IntVar()
d.set(12)
# Selected Color
mycol = (0,0,0)
# Function to update display
def update_photo(data=None,widget=None):
global Z
if data is None: # By default, assume we're updating with the current value of Z
data = np.repeat(np.repeat(np.uint8(from_tanh(model.sample_at(np.float32([Z.flatten()]))[0])),4,1),4,2)
else:
data = np.repeat(np.repeat(np.uint8(data),4,1),4,2)
if widget is None:
widget = output
# Reshape image to canvas
mshape = (4*64,4*64,1)
im = Image.fromarray(np.concatenate([np.reshape(data[0],mshape),np.reshape(data[1],mshape),np.reshape(data[2],mshape)],axis=2),mode='RGB')
# Make sure photo is an object of the current widget so the garbage collector doesn't wreck it
widget.photo = ImageTk.PhotoImage(image=im)
widget.create_image(0,0,image=widget.photo,anchor=NW)
widget.tag_raise(pixel_rect)
# Function to update the latent canvas.
def update_canvas(widget=None):
global r, Z, res, rects, painted_rects
if widget is None:
widget = w
# Update display values
r = np.repeat(np.repeat(Z,r.shape[0]//Z.shape[0],0),r.shape[1]//Z.shape[1],1)
# If we're letting freeform painting happen, delete the painted rectangles
for p in painted_rects:
w.delete(p)
painted_rects = []
for i in range(Z.shape[0]):
for j in range(Z.shape[1]):
w.itemconfig(int(rects[i,j]),fill = rb(255*Z[i,j]),outline = rb(255*Z[i,j]))
# Function to move the paintbrush
def move_mouse( event ):
global output
# using a rectangle width equivalent to d/4 (so 1-16)
# First, get location and extent of local patch
x,y = event.x//4,event.y//4
brush_width = ((d.get()//4)+1)
# if x is near the left corner, then the minimum x is dependent on how close it is to the left
xmin = max(min(x-brush_width//2,64 - brush_width),0) # This 64 may need to change if the canvas size changes
xmax = xmin+brush_width
ymin = max(min(y-brush_width//2,64 - brush_width),0) # This 64 may need to change if the canvas size changes
ymax = ymin+brush_width
# update output canvas
output.coords(pixel_rect,4*xmin,4*ymin,4*xmax,4*ymax)
output.tag_raise(pixel_rect)
output.itemconfig(pixel_rect,outline = rgb(mycol[0],mycol[1],mycol[2]))
### Optional functions for the Neural Painter
# Localized Gaussian Smoothing Kernel
# Use this if you want changes to MASK to be more localized to the brush location in soe sense
def gk(c1,r1,c2,r2):
# First, create X and Y arrays indicating distance to the boundaries of the paintbrush
# In this current context, im is the ordinal number of pixels (64 typically)
sigma = 0.3
im = 64
x = np.repeat([np.concatenate([np.mgrid[-c1:0],np.zeros(c2-c1),np.mgrid[1:1+im-c2]])],im,axis=0)
y = np.repeat(np.vstack(np.concatenate([np.mgrid[-r1:0],np.zeros(r2-r1),np.mgrid[1:1+im-r2]])),im,axis=1)
g = np.exp(-(x**2/float(im)+y**2/float(im))/(2*sigma**2))
return np.repeat([g],3,axis=0) # remove the 3 if you want to apply this to mask rather than an RGB channel
# This function reduces the likelihood of a change based on how close each individual pixel is to a maximal value.
# Consider conditioning this based on the gK value and the requested color. I.E. instead of just a flat distance from 128,
# have it be a difference from the expected color at a given location. This could also be used to "weight" the image towards staying the same.
def upperlim(image):
h=1
return (1.0/((1.0/h)*np.abs(image-128)+1))
# Similar to upperlim, this function changes the value of the correction term if it's going to move pixels too close to a maximal value
def dampen(input,correct):
# The closer input+correct is to -1 or 1, the further it is from 0.
# We're okay with almost all values (i.e. between 0 and 0.8) but as we approach 1 we want to slow the change
thresh = 0.75
m = (input+correct)>thresh
return -input*m+correct*(1-m)+thresh*m
### Neural Painter Function
def paint( event ):
global Z, output, myRGB, IM, ERROR, RECON, USER_MASK, SAMPLE_FLAG
# Move the paintbrush
move_mouse(event)
# Define a gradient descent step-size
weight = 0.05
# Get paintbrush location
[x1,y1,x2,y2] = [coordinate//4 for coordinate in output.coords(pixel_rect)]
# Get dIM/dZ that minimizes the difference between IM and RGB in the domain of the paintbrush
temp = np.asarray(model.imgradRGB(x1,y1,x2,y2,np.float32(to_tanh(myRGB)),np.float32([Z.flatten()]))[0])
grad = temp.reshape((10,10))*(1+(x2-x1))
# Update Z
Z -=weight*grad
# If operating on a sample, update sample
if SAMPLE_FLAG:
update_canvas(w)
update_photo(None,output)
# Else, update photo
else:
# Difference between current image and reconstruction
DELTA = model.sample_at(np.float32([Z.flatten()]))[0]-to_tanh(np.float32(RECON))
# Not-Yet-Implemented User Mask feature
# USER_MASK[y1:y2,x1:x2]+=0.05
# Get MASK
MASK=scipy.ndimage.filters.gaussian_filter(np.min([np.mean(np.abs(DELTA),axis=0),np.ones((64,64))],axis=0),0.7)
# Optionally dampen D
# D = dampen(to_tanh(np.float32(RECON)),MASK*DELTA+(1-MASK)*ERROR)
# Update image
D = MASK*DELTA+(1-MASK)*ERROR
IM = np.uint8(from_tanh(to_tanh(RECON)+D))
# Pass updates
update_canvas(w)
update_photo(IM,output)
# Load an image and infer/reconstruct from it. Update this with a function to load your own images if you want to edit
# non-celebA photos.
def infer():
global Z,w,GIM,IM,ERROR,RECON,DELTA,USER_MASK,SAMPLE_FLAG
val = myentry.get()
try:
val = int(val)
GIM = np.asarray(np.load('CelebAValid.npz')['arr_0'][val])
IM = GIM
except ValueError:
print "No input"
val = 420
GIM = np.asarray(np.load('CelebAValid.npz')['arr_0'][val])
IM = GIM
# myentry.delete(0, END) # Optionally, clear entry after typing it in
# Reset Delta
DELTA = np.zeros(np.shape(IM),dtype=np.float32)
# Infer and reshape latents. This can be done without an intermediate variable if desired
s = model.encode_images(np.asarray([to_tanh(IM)],dtype=np.float32))
Z = np.reshape(s[0],np.shape(Z))
# Get reconstruction
RECON = np.uint8(from_tanh(model.sample_at(np.float32([Z.flatten()]))[0]))
# Get error
ERROR = to_tanh(np.float32(IM)) - to_tanh(np.float32(RECON))
# Reset user mask
USER_MASK*=0
# Clear the sample flag
SAMPLE_FLAG=0
# Update photo
update_photo(IM,output)
update_canvas(w)
# Paint directly into the latent space
def paint_latents( event ):
global r, Z, output,painted_rects,MASK,USER_MASK,RECON
# Get extent of latent paintbrush
x1, y1 = ( event.x - d.get() ), ( event.y - d.get() )
x2, y2 = ( event.x + d.get() ), ( event.y + d.get() )
selected_widget = event.widget
# Paint in latent space and update Z
painted_rects.append(event.widget.create_rectangle( x1, y1, x2, y2, fill = rb(color.get()),outline = rb(color.get()) ))
r[max((y1-bd),0):min((y2-bd),r.shape[0]),max((x1-bd),0):min((x2-bd),r.shape[1])] = color.get()/255.0;
Z = np.asarray([np.mean(o) for v in [np.hsplit(h,Z.shape[0])\
for h in np.vsplit((r),Z.shape[1])]\
for o in v]).reshape(Z.shape[0],Z.shape[1])
if SAMPLE_FLAG:
update_photo(None,output)
update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
else:
DELTA = model.sample_at(np.float32([Z.flatten()]))[0]-to_tanh(np.float32(RECON))
MASK=scipy.ndimage.filters.gaussian_filter(np.min([np.mean(np.abs(DELTA),axis=0),np.ones((64,64))],axis=0),0.7)
# D = dampen(to_tanh(np.float32(RECON)),MASK*DELTA+(1-MASK)*ERROR)
D = MASK*DELTA+(1-MASK)*ERROR
IM = np.uint8(from_tanh(to_tanh(RECON)+D))
update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
update_photo(IM,output)
# Scroll to lighten or darken an image patch
def scroll( event ):
global r,Z,output
# Optional alternate method to get a single X Y point
# x,y = np.floor( ( event.x - (output.winfo_rootx() - master.winfo_rootx()) ) / 4), np.floor( ( event.y - (output.winfo_rooty() - master.winfo_rooty()) ) / 4)
weight = 0.1
[x1,y1,x2,y2] = [coordinate//4 for coordinate in output.coords(pixel_rect)]
grad = np.reshape(model.imgrad(x1,y1,x2,y2,np.float32([Z.flatten()]))[0],Z.shape)*(1+(x2-x1))
Z+=np.sign(event.delta)*weight*grad
update_canvas(w)
update_photo(None,output)
# Samples in the latent space
def sample():
global Z, output,RECON,IM,ERROR,SAMPLE_FLAG
Z = np.random.randn(Z.shape[0],Z.shape[1])
# Z = np.random.uniform(low=-1.0,high=1.0,size=(Z.shape[0],Z.shape[1])) # Optionally get uniform sample
# Update reconstruction and error
RECON = np.uint8(from_tanh(model.sample_at(np.float32([Z.flatten()]))[0]))
ERROR = to_tanh(np.float32(IM)) - to_tanh(np.float32(RECON))
update_canvas(w)
SAMPLE_FLAG=1
update_photo(None,output)
# Reset to ground-truth image
def Reset():
global GIM,IM,Z, DELTA,RECON,ERROR,USER_MASK,SAMPLE_FLAG
IM = GIM
Z = np.reshape(model.encode_images(np.asarray([to_tanh(IM)],dtype=np.float32))[0],np.shape(Z))
DELTA = np.zeros(np.shape(IM),dtype=np.float32)
RECON = np.uint8(from_tanh(model.sample_at(np.float32([Z.flatten()]))[0]))
ERROR = to_tanh(np.float32(IM)) - to_tanh(np.float32(RECON))
USER_MASK*=0
SAMPLE_FLAG=0
update_canvas(w)
update_photo(IM,output)
def UpdateGIM():
global GIM,IM
GIM = IM
Reset()# Recalc the latent space for the new ground-truth image.
# Change brush size
def update_brush(event):
brush.create_rectangle(0,0,25,25,fill=rgb(255,255,255),outline=rgb(255,255,255))
brush.create_rectangle( int(12.5-d.get()/4.0), int(12.5-d.get()/4.0), int(12.5+d.get()/4.0), int(12.5+d.get()/4.0), fill = rb(color.get()),outline = rb(color.get()) )
# assign color picker values to myRGB
def getColor():
global myRGB, mycol
col = askcolor(mycol)
if col[0] is None:
return # Dont change color if Cancel pressed.
mycol = col[0]
for i in xrange(3): myRGB[0,i,:,:] = mycol[i]; # assign
# Optional function to "lock" latents so that gradients are always evaluated with respect to the locked Z
# def lock():
# global Z,locked, Zlock, lockbutton
# lockbutton.config(relief='raised' if locked else 'sunken')
# Zlock = Z if not locked else Zlock
# locked = not locked
# lockbutton = Button(f, text="Lock", command=lock,relief='raised')
# lockbutton.pack(side=LEFT)
### Prepare GUI
master.bind("<MouseWheel>",scroll)
# Prepare drawing canvas
f=Frame(master)
f.pack(side=TOP)
output = Canvas(f,name='output',width=64*4,height=64*4)
output.bind('<Motion>',move_mouse)
output.bind('<B1-Motion>', paint )
pixel_rect = output.create_rectangle(0,0,4,4,outline = 'yellow')
output.pack()
# Prepare latent canvas
f = Frame(master,width=res*dim[0],height=dim[1]*10)
f.pack(side=TOP)
w = Canvas(f,name='canvas', width=res*dim[0],height=res*dim[1])
w.bind( "<B1-Motion>", paint_latents )
# Produce painted rectangles
for i in range(Z.shape[0]):
for j in range(Z.shape[1]):
rects[i,j] = w.create_rectangle( j*res, i*res, (j+1)*res, (i+1)*res, fill = rb(255*Z[i,j]),outline = rb(255*Z[i,j]) )
# w.create_rectangle( 0,0,res*dim[0],res*dim[1], fill = rgb(255,255,255),outline=rgb(255,255,255)) # Optionally Initialize canvas to white
w.pack()
# Color gradient
gradient = Canvas(master, width=400, height=20)
gradient.pack(side=TOP)
# gradient.grid(row=i+1)
for j in range(-200,200):
gradient.create_rectangle(j*255/200+200,0,j*255/200+201,20,fill = rb(j*255/200),outline=rb(j*255/200))
# Color scale slider
f= Frame(master)
Scale(master, from_=-255, to=255,length=canvas_width, variable = color,orient=HORIZONTAL,showvalue=0,command=update_brush).pack(side=TOP)
# Buttons and brushes
Button(f, text="Sample", command=sample).pack(side=LEFT)
Button(f, text="Reset", command=Reset).pack(side=LEFT)
Button(f, text="Update", command=UpdateGIM).pack(side=LEFT)
brush = Canvas(f,width=25,height=25)
Scale(f, from_=0, to=64,length=100,width=25, variable = d,orient=HORIZONTAL,showvalue=0,command=update_brush).pack(side=LEFT) # Brush diameter scale
brush.pack(side=LEFT)
inferbutton = Button(f, text="Infer", command=infer)
inferbutton.pack(side=LEFT)
colorbutton=Button(f,text='Col',command=getColor)
colorbutton.pack(side=LEFT)
myentry = Entry()
myentry.pack(side=LEFT)
f.pack(side=TOP)
print('Running')
# Reset and infer to kick it off
Reset()
infer()
mainloop()
| [
"659338505@qq.com"
] | 659338505@qq.com |
5345f42313ef7de64d8be3c515006672cfbe3f6a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02623/s047885182.py | 44b5644e8a45249917468d9e82d48b09397b8cd8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | n, m, k =map(int, input().split())
a=list(map(int, input().split()))
b=list(map(int, input().split()))
ta=sum(a)
a.append(0)
tb=0
ans=0
j=0
for i in range(n+1):
ta -= a[n-i]
if ta>k:
continue
while tb + ta<=k:
if j ==m:
ans=max(ans,n-i+j)
break
ans=max(ans,n-i+j)
tb += b[j]
j +=1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d291ae309bf15bcef30a1347c7e5b7ae95daa964 | 9fadff638a2a381e662ef7cb8b943b3a4622db2f | /RootTools/python/samples/samples_13TeV_RunIIAutumn18MiniAOD.py | 59655a278c3adaf741a54509a2b18a1fb07d7db3 | [] | no_license | cmg-xtracks/cmgtools-lite | 34c92bae02f5f19ce2a1f8384f873780636e9bf5 | 147c61f72ab4d118cfa5d938d7527c3e22e295bf | refs/heads/94X_dev | 2021-06-05T22:43:44.887643 | 2019-03-28T17:04:35 | 2019-03-28T17:04:35 | 142,139,866 | 1 | 0 | null | 2019-05-16T16:26:07 | 2018-07-24T09:59:33 | Python | UTF-8 | Python | false | false | 17,726 | py | #
# How to get here:
# -> https://twiki.cern.ch/twiki/bin/viewauth/CMS/PdmV2018Analysis
# -> https://cmsweb.cern.ch/das/request?view=list&limit=50&instance=prod%2Fglobal&input=%2F*%2FRunIIAutumn18MiniAOD*102X_upgrade2018_realistic_v15*%2FMINIAODSIM
#
# COMPONENT CREATOR
from CMGTools.RootTools.samples.ComponentCreator import ComponentCreator
kreator = ComponentCreator()
#
# --> https://cmsweb.cern.ch/das/request?view=list&limit=50&instance=prod%2Fglobal&input=%2FQCD_HT*_TuneCP5_13TeV-madgra*pythia8%2FRunIIAutumn18MiniAOD*102X_upgrade2018_realistic_v15*%2FMINIAODSIM
#
# QCD
QCD_HT100to200 = kreator.makeMCComponent("QCD_HT100to200" , "/QCD_HT100to200_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 2.463e+07*1.13073)
QCD_HT200to300 = kreator.makeMCComponent("QCD_HT200to300" , "/QCD_HT200to300_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 1.553e+06*1.1056 )
QCD_HT300to500 = kreator.makeMCComponent("QCD_HT300to500" , "/QCD_HT300to500_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 347500*1.01094 )
QCD_HT500to700 = kreator.makeMCComponent("QCD_HT500to700" , "/QCD_HT500to700_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 29930*1.0568 )
QCD_HT700to1000 = kreator.makeMCComponent("QCD_HT700to1000" , "/QCD_HT700to1000_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 6370*1.06782 )
QCD_HT1000to1500 = kreator.makeMCComponent("QCD_HT1000to1500", "/QCD_HT1000to1500_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 1100*1.09636 )
QCD_HT1500to2000 = kreator.makeMCComponent("QCD_HT1500to2000", "/QCD_HT1500to2000_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 98.71 )
QCD_HT2000toInf = kreator.makeMCComponent("QCD_HT2000toInf" , "/QCD_HT2000toInf_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 20.2 )
QCD = [
QCD_HT100to200,
QCD_HT200to300,
QCD_HT300to500,
QCD_HT500to700,
QCD_HT700to1000,
QCD_HT1000to1500,
QCD_HT1500to2000,
QCD_HT2000toInf,
]
#
# --> https://cmsweb.cern.ch/das/request?view=list&limit=50&instance=prod%2Fglobal&input=%2FWJetsToLNu_HT-*_TuneCP5_13TeV-madgraphMLM-pythia8%2FRunIIAutumn18MiniAOD*102X_upgrade2018_realistic_v15*%2FMINIAODSIM
#
#
# /WJetsToLNu_HT-70To100_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM
# ---> missing xsec?
# https://cms-gen-dev.cern.ch/xsdb/?searchQuery=DAS=WJetsToLNu_HT-70To100_TuneCP5_13TeV-madgraphMLM-pythia8
#
# W + Jets
WJets_HT100to200 = kreator.makeMCComponent("WJets_HT100to200" , "/WJetsToLNu_HT-100To200_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 1345.0)
WJets_HT200to400 = kreator.makeMCComponent("WJets_HT200to400" , "/WJetsToLNu_HT-200To400_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 359.7)
WJets_HT400to600 = kreator.makeMCComponent("WJets_HT400to600" , "/WJetsToLNu_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 48.91)
WJets_HT600to800 = kreator.makeMCComponent("WJets_HT600to800" , "/WJetsToLNu_HT-600To800_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 12.05)
WJets_HT800to1200 = kreator.makeMCComponent("WJets_HT800to1200" , "/WJetsToLNu_HT-800To1200_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 5.501)
WJets_HT1200to2500 = kreator.makeMCComponent("WJets_HT1200to2500", "/WJetsToLNu_HT-1200To2500_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 1.329)
WJets_HT2500toInf = kreator.makeMCComponent("WJets_HT2500toInf" , "/WJetsToLNu_HT-2500ToInf_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 3.216e-2)
Ws = [
WJets_HT100to200,
WJets_HT200to400,
WJets_HT400to600,
WJets_HT600to800,
WJets_HT800to1200,
WJets_HT1200to2500,
WJets_HT2500toInf,
]
# DY + Jets
DYJetsM50 = kreator.makeMCComponent("DYJetsM50" , "/DYJetsToLL_M-50_TuneCP5_13TeV-amcatnloFXFX-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 1921.8*3, fracNegWeights=0.16)
#DYJetsM50e = kreator.makeMCComponent("DYJetsM50e" , "/DYJetsToLL_M-50_TuneCP5_13TeV-amcatnloFXFX-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14_ext1-v1/MINIAODSIM" , "CMS", ".*root", 1921.8*3, fracNegWeights=0.16)
#
# https://cmsweb.cern.ch/das/request?view=list&limit=50&instance=prod%2Fglobal&input=%2FDYJetsToLL_M-50_HT-*_Tune*ythia8%2FRunIIAutumn18MiniAOD*102X_upgrade2018_realistic_v15*%2FMINIAODSIM
#
#
#
# /DYJetsToLL_M-50_HT-70to100_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM ---> missing xsec
# /DYJetsToLL_M-50_HT-100to200_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM
# /DYJetsToLL_M-50_HT-200to400_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM
# /DYJetsToLL_M-50_HT-400to600_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v7/MINIAODSIM which one?
# /DYJetsToLL_M-50_HT-400to600_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v4/MINIAODSIM -
# /DYJetsToLL_M-50_HT-400to600_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v3/MINIAODSIM -
# /DYJetsToLL_M-50_HT-400to600_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM -
# /DYJetsToLL_M-50_HT-600to800_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM
# /DYJetsToLL_M-50_HT-800to1200_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM
# /DYJetsToLL_M-50_HT-2500toInf_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM
#
#
DYJetsM50_HT100to200 = kreator.makeMCComponent("DYJetsM50_HT100to200" , "/DYJetsToLL_M-50_HT-100to200_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM" , "CMS", ".*root", 147.40)
DYJetsM50_HT200to400 = kreator.makeMCComponent("DYJetsM50_HT200to400" , "/DYJetsToLL_M-50_HT-200to400_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM" , "CMS", ".*root", 40.99)
DYJetsM50_HT400to600 = kreator.makeMCComponent("DYJetsM50_HT400to600" , "/DYJetsToLL_M-50_HT-400to600_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v7/MINIAODSIM" , "CMS", ".*root", 5.678)
DYJetsM50_HT600to800 = kreator.makeMCComponent("DYJetsM50_HT600to800" , "/DYJetsToLL_M-50_HT-600to800_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM" , "CMS", ".*root", 1.367)
DYJetsM50_HT800to1200 = kreator.makeMCComponent("DYJetsM50_HT800to1200" , "/DYJetsToLL_M-50_HT-800to1200_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM" , "CMS", ".*root", 0.6304)
# FIXME MISSING SAMPLE DYJetsM50_HT1200to2500 = kreator.makeMCComponent("DYJetsM50_HT1200to2500" , "/DYJetsToLL_M-50_HT-1200to2500_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM" , "CMS", ".*root", 0.1514)
DYJetsM50_HT2500toInf = kreator.makeMCComponent("DYJetsM50_HT2500toInf" , "/DYJetsToLL_M-50_HT-2500toInf_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM" , "CMS", ".*root", 3.565e-3)
DY = [
DYJetsM50,
DYJetsM50_HT100to200,
DYJetsM50_HT200to400,
DYJetsM50_HT400to600,
DYJetsM50_HT600to800,
DYJetsM50_HT800to1200,
# FIXME MISSING SAMPLE DYJetsM50_HT1200to2500,
DYJetsM50_HT2500toInf,
]
# Z(vv) + Jets
#
# https://cmsweb.cern.ch/das/request?view=list&limit=50&instance=prod%2Fglobal&input=%2FZJetsToNuNu_HT*_13TeV-madgraph%2FRunIIAutumn18MiniAOD*102X_upgrade2018_realistic_v15*%2FMINIAODSIM
#
#
# /ZJetsToNuNu_HT-100To200_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM
# /ZJetsToNuNu_HT-200To400_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM
# /ZJetsToNuNu_HT-600To800_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM
# /ZJetsToNuNu_HT-800To1200_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM
# /ZJetsToNuNu_HT-1200To2500_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM
# /ZJetsToNuNu_HT-2500ToInf_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM
#
#
ZvvJets_HT100to200 = kreator.makeMCComponent("ZvvJets_HT100to200" , "/ZJetsToNuNu_HT-100To200_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 280.35 )
ZvvJets_HT200to400 = kreator.makeMCComponent("ZvvJets_HT200to400" , "/ZJetsToNuNu_HT-200To400_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 77.67 )
# FIXME MISSING SAMPLE ZvvJets_HT400to600 = kreator.makeMCComponent("ZvvJets_HT400to600" , "/ZJetsToNuNu_HT-400To600_13TeV-madgraph/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM" , "CMS", ".*root", 10.73 )
ZvvJets_HT600to800 = kreator.makeMCComponent("ZvvJets_HT600to800" , "/ZJetsToNuNu_HT-600To800_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 2.559 )
ZvvJets_HT800to1200 = kreator.makeMCComponent("ZvvJets_HT800to1200" , "/ZJetsToNuNu_HT-800To1200_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 1.1796 )
ZvvJets_HT1200to2500 = kreator.makeMCComponent("ZvvJets_HT1200to2500", "/ZJetsToNuNu_HT-1200To2500_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 0.28833 )
ZvvJets_HT2500toInf = kreator.makeMCComponent("ZvvJets_HT2500toInf" , "/ZJetsToNuNu_HT-2500ToInf_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 0.006945)
Zvv = [
ZvvJets_HT100to200,
ZvvJets_HT200to400,
# FIXME MISSING SAMPLE ZvvJets_HT400to600,
ZvvJets_HT600to800,
ZvvJets_HT800to1200
ZvvJets_HT1200to2500,
ZvvJets_HT2500toInf
]
# TT
TTJets = kreator.makeMCComponent("TTJets", "/TTJets_TuneCP5_13TeV-amcatnloFXFX-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15_ext1-v2/MINIAODSIM" , "CMS", ".*root", 831.76, fracNegWeights=0.319 )
# FIXME MISSING SAMPLE TTLep = kreator.makeMCComponent("TTLep" , "/TTTo2L2Nu_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM" , "CMS", ".*root", 831.76*((3*0.108)**2) )
# FIXME MISSING SAMPLE TTHad = kreator.makeMCComponent("TTHad" , "/TTToHadronic_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM" , "CMS", ".*root", 831.76*((1-3*0.108)**2) )
# FIXME MISSING SAMPLE TTSemi = kreator.makeMCComponent("TTSemi", "/TTToSemiLeptonic_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM", "CMS", ".*root", 831.76*2*(3*0.108)*(1-3*0.108) )
TT = [
TTJets
#TTLep,
#TTHad,
#TTSemi
]
# Single top
# FIXME MISSING SAMPLE T_tch = kreator.makeMCComponent("T_tch" , "/ST_t-channel_top_4f_inclusiveDecays_TuneCP5_13TeV-powhegV2-madspin-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM" , "CMS", ".*root", 136.02)
# FIXME MISSING SAMPLE TBar_tch = kreator.makeMCComponent("TBar_tch" , "/ST_t-channel_antitop_4f_inclusiveDecays_TuneCP5_13TeV-powhegV2-madspin-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM", "CMS", ".*root", 80.95)
T_tWch = kreator.makeMCComponent("T_tWch" , "/ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15_ext1-v3/MINIAODSIM" , "CMS", ".*root", 19.55)
TBar_tWch = kreator.makeMCComponent("TBar_tWch", "/ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15_ext1-v3/MINIAODSIM" , "CMS", ".*root", 19.55)
#
# /ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15_ext1-v3/MINIAODSIM which one?
# /ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15_ext1-v2/MINIAODSIM
#
# /ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15_ext1-v3/MINIAODSIM which one?
# /ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15_ext1-v2/MINIAODSIM
#
#
#
Ts = [
# FIXME MISSING T_tch,
# FIXME MISSING TBar_tch,
T_tWch,
TBar_tWch
]
# Diboson
WW = kreator.makeMCComponent("WW" , "/WW_TuneCP5_13TeV-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM" , "CMS", ".*root", 63.21 * 1.82)
WZ = kreator.makeMCComponent("WZ" , "/WZ_TuneCP5_13TeV-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v3/MINIAODSIM" , "CMS", ".*root", 47.13)
ZZ = kreator.makeMCComponent("ZZ" , "/ZZ_TuneCP5_13TeV-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v2/MINIAODSIM" , "CMS", ".*root", 16.523)
# FIXME MISSING WWTo2L2Nu = kreator.makeMCComponent("WWTo2L2Nu" , "/WWTo2L2Nu_NNPDF31_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14_ext1-v1/MINIAODSIM", "CMS", ".*root", 10.481 )
WWToLNuQQ = kreator.makeMCComponent("WWToLNuQQ" , "/WWToLNuQQ_NNPDF31_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 43.53 )
# /WZTo3LNu_TuneCP5_13TeV-amcatnloFXFX-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15_ext1-v2/MINIAODSIM
# /WZTo3LNu_TuneCP5_13TeV-amcatnloFXFX-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM
WZTo3LNu = kreator.makeMCComponent("WZTo3LNu" , "/WZTo3LNu_TuneCP5_13TeV-amcatnloFXFX-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 5.063, fracNegWeights=0.189 )
# FIXME MISSING WZToLNu2Q = kreator.makeMCComponent("WZToLNu2Q" , "/WZTo1L1Nu2Q_13TeV_amcatnloFXFX_madspin_pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v2/MINIAODSIM" , "CMS", ".*root", 10.71, fracNegWeights=0.204 )
WZTo2L2Q = kreator.makeMCComponent("WZTo2L2Q" , "/WZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 5.595, fracNegWeights=0.204 )
# FIXME MISSING ZZTo4L = kreator.makeMCComponent("ZZTo4L" , "/ZZTo4L_13TeV_powheg_pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM" , "CMS", ".*root", 1.256)
# FIXME MISSING ZZTo4Le = kreator.makeMCComponent("ZZTo4Le" , "/ZZTo4L_13TeV_powheg_pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14_ext1-v1/MINIAODSIM" , "CMS", ".*root", 1.256)
ZZTo2L2Q = kreator.makeMCComponent("ZZTo2L2Q" , "/ZZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM" , "CMS", ".*root", 3.220)
DiBosons = [
WW,
# FIXME MISSING WWTo2L2Nu,
WWToLNuQQ,
WZ,
WZTo3LNu,
# FIXME MISSING WZToLNu2Q,
WZTo2L2Q,
ZZ,
# FIXME MISSING ZZTo4L,
# FIXME MISSING ZZTo4Le,
ZZTo2L2Q,
]
# ----------------------------- summary ----------------------------------------
mcSamples = QCD + Ws + DY + Zvv + TT + Ts + DiBosons
samples = mcSamples
# ---------------------------------------------------------------------
if __name__ == "__main__":
from CMGTools.RootTools.samples.tools import runMain
runMain(samples, localobjs=locals())
| [
"massironi.andrea@gmail.com"
] | massironi.andrea@gmail.com |
adca2c6960f7bbac282ee2b716b6491ccc961149 | ce0a3a73c7825f7327b8319fb2593b6b01659bb0 | /django2/django2/urls.py | 595e965be38913aca0dabcf5b90b553d7fa2b72b | [] | no_license | soccergame/deeplearning | 28b0a6ed85df12e362b3a451050fab5a2a994be7 | cbc65d3eba453992a279cfd96a9d3640d8fe6b9f | refs/heads/master | 2020-03-28T22:38:26.085464 | 2018-08-31T11:22:39 | 2018-08-31T11:22:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from django.conf.urls import url
from django.contrib import admin
from . import view, testdb, search, search2
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^hello$', view.hello),
url(r'^testdb$', testdb.testdb),
url(r'^search-form$', search.search_form),
url(r'^search$', search.search),
url(r'^search-post$', search2.search_post),
]
| [
"18811442380@163.com"
] | 18811442380@163.com |
f0e1b3caf276b73936a22e0d07640e6442fe1083 | 43e5441f74359d620be6f7f80c99622769ea9774 | /apps/userprofile/views.py | 7e593193b5d5447e4364e51d60e04116950e793b | [] | no_license | 33Da/deeplearn_eassy | 96f1bd09fe3df907c650378215eb686e4ab2801e | 82d60c5ec3aec60822d68d13f11ef1320d0bba2e | refs/heads/master | 2023-02-07T15:02:00.202693 | 2021-01-05T05:03:22 | 2021-01-05T05:03:22 | 326,892,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,884 | py | import re
from rest_framework import mixins
from rest_framework import viewsets
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework import authentication
from .serializers import *
from apps.utils.util import send_email,create_vaildcode
from django.contrib.auth.backends import ModelBackend
from django.db.models import Q
User = get_user_model()
class CumstomBackend(ModelBackend):
def authenticate(self, request, username=None,email=None, password=None, **kwargs):
try:
user = User.objects.get(username=username)
print(1)
if user.check_password(password):
return user
except Exception as e:
return None
"""用户"""
class RegisterViewSet(APIView):
"""注册用户"""
def post(self,request,*args,**kwargs):
# 校验参数
serializer = UserLoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# 保存
serializer.save()
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": [],
}, status=status.HTTP_200_OK)
# 用户修改
class UserViewset(mixins.UpdateModelMixin, mixins.CreateModelMixin,mixins.RetrieveModelMixin,viewsets.GenericViewSet, mixins.ListModelMixin):
"""
retrieve:查看信息
update:更新用户,用户修改信息
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
serializer_class = UserDetailSerializer
permission_classes = (IsAuthenticated,)
def update(self, request, *args, **kwargs):
# 获取用户
user = request.user
email = request.data.get('email',None)
username = request.data.get('username',None)
if not all([email,username]):
raise ValidationError('参数不全')
emailcount = UserProfile.objects.filter(email=email).exclude(id=request.user.id).count()
usernamecount = UserProfile.objects.filter(username=username).exclude(id=request.user.id).count()
if emailcount > 0:
raise ValidationError('邮箱存在')
if usernamecount > 0:
raise ValidationError('用户名存在')
user.email = email
user.username = username
user.save()
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": "修改成功",
}, status=status.HTTP_200_OK)
def retrieve(self, request, *args, **kwargs):
user_id = request.user.id
try:
user = UserProfile.objects.filter(id=int(user_id)).get()
except Exception as e:
print(e)
raise ValidationError("参数错误")
ret = self.get_serializer(user)
ret = ret.data
# 文案数
ret["document_count"] = len(ret["document"])
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": [ret],
}, status=status.HTTP_200_OK)
def perform_create(self, serializer):
return serializer.save()
class PasswordViewset(mixins.UpdateModelMixin,viewsets.GenericViewSet):
"""
update:更新密码
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (IsAuthenticated,)
def update(self, request, *args, **kwargs):
# 获取用户
user = request.user
serializer = PasswordSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# 校验验证码
try:
ture_vaildcode = int(cache.get(request.user.email, None))
except Exception as e:
print(e)
raise ValidationError({'error': ['验证码错误']})
if ture_vaildcode != int(serializer.validated_data["vaildcode"]):
raise ValidationError({'error': ['验证码错误']})
# 把缓存删除
cache.set(request.user.email, '555', 1)
user.set_password(serializer.validated_data["password"])
user.save()
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": "修改成功",
}, status=status.HTTP_200_OK)
class VaildcodeViewSet(APIView):
"""
生成验证码
"""
def post(self,request,*args,**kwargs):
# 获取email
email = request.data.get("email","11")
# 校验email
result = re.match(r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$", email)
if result is None:
raise ValidationError("邮箱为空或格式错误")
# 生成验证码
code = create_vaildcode(email)
# 发送验证码
send_status = send_email(valid_code=code,email=email)
# send_status = 1
if send_status == 1:
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": "",
}, status=status.HTTP_200_OK)
else:
return Response({"status_code": '400',
"message": "error",
"results":"发送失败",
}, status=status.HTTP_200_OK)
class HeadPicViewSet(APIView):
"""
头像
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (IsAuthenticated,)
def get(self,request,*args,**kwargs):
user = request.user
try:
pic_url = user.head_pic.url
except Exception as e:
print(e)
pic_url = None
print(pic_url)
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": [{"pic":pic_url}],
}, status=status.HTTP_200_OK)
def post(self,request,*args,**kwargs):
user = request.user
pic = request.FILES.get('file')
if pic is None:
raise ValidationError("未上传文件")
user.head_pic = pic
user.save()
return Response({"status_code": status.HTTP_200_OK,
"message": "ok",
"results": [],
}, status=status.HTTP_200_OK)
| [
"764720843@qq.com"
] | 764720843@qq.com |
55cecae7878e380607863d0d4a5958f4b2b29c5c | 3b8fd5b73337b3cd70b283644c266d4ec962ad54 | /2020-2021/DEV1/Chapter 4/BA6.py | ad6e0f0204c3089a9327ecfeed2c4cd9e16c8dd4 | [] | no_license | Andy00097/hrinf-development | 20f4604ca5637c710d9d25e7e218a2ae1233498b | 464ca039537d6b8ca04bf95ba070b8f1f7b81188 | refs/heads/main | 2023-01-09T03:52:54.214472 | 2020-11-11T10:11:57 | 2020-11-11T10:11:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | x = 7
y = 8
z = 5
print()
if x < y and x < z:
minimum = x
minStr = 'min is x : ' + str(x)
if y < x and y < z:
minimum = y
minStr = 'min is y : ' + str(y)
if z < x and z < y:
minimum = z
minStr = 'min is z : ' + str(z)
print() | [
"stijn@kolkies.dev"
] | stijn@kolkies.dev |
ce9d21a2b5baf0bcf9b3667360ecdff5b94a9ce4 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/notification/BaseNotificationView.py | bd3e9ecdba53e2b53b7572bb7e917edc3733fb4d | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,429 | py | # 2017.05.04 15:27:34 Střední Evropa (letní čas)
# Embedded file name: scripts/client/notification/BaseNotificationView.py
from debug_utils import LOG_ERROR
class BaseNotificationView(object):
def __init__(self, model = None):
super(BaseNotificationView, self).__init__()
self._model = None
self.__flashIDCounter = 0
self.__flashIdToEntityIdMap = {}
self.__entityIdToFlashIdMap = {}
self.setModel(model)
return
def setModel(self, value):
self._model = value
def cleanUp(self):
self._model = None
return
def _getFlashID(self, notId):
if notId in self.__entityIdToFlashIdMap:
return self.__entityIdToFlashIdMap[notId]
else:
self.__flashIDCounter += 1
self.__flashIdToEntityIdMap[self.__flashIDCounter] = notId
self.__entityIdToFlashIdMap[notId] = self.__flashIDCounter
return self.__flashIDCounter
def _getNotificationID(self, flashId):
if flashId in self.__flashIdToEntityIdMap:
return self.__flashIdToEntityIdMap[flashId]
LOG_ERROR('Wrong notification ScaleForm id', flashId)
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\notification\BaseNotificationView.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:27:34 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
dda5436c28630a5d24d25127b608204ac8621153 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetcodePythonProject_with_solution/leetcode_0551_0600/LeetCode555_SplitConcatenatedStrings.py | 1611f5635c66d3a1c0372441ef57cf5e8dacf617 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 874 | py | '''
Created on Aug 24, 2017
@author: MT
'''
c_ Solution(o..
___ splitLoopedString strs
"""
:type strs: List[str]
:rtype: str
"""
res N..
arr [m..(s, s[::-1]) ___ s __ strs]
___ i, s __ e..(arr
___ start __ (s, s[::-1]
___ j __ r..(l..(start)+1
__ n.. res:
res start[j:] + ''.j..(arr[i+1:]+arr[:i]) + start[:j]
____
res m..(res, start[j:] + ''.j..(arr[i+1:]+arr[:i]) + start[:j])
r.. res
___ test
testCases [
'abc', 'xyz' ,
]
___ strs __ testCases:
print('strs: %s' % strs)
result splitLoopedString(strs)
print('result: %s' % result)
print('-='*30+'-')
__ _____ __ _____
Solution().test()
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
e45c68224c72d2987f3a4acb7dbc4ce2ca5d0784 | 7c615414af2591146f2898444fb68f60e00a8482 | /8-20/flask-test/runserver.py | dbb9fd623641a4e7a706257506126f99569bc4a7 | [] | no_license | guulyfox/Demonkeyse-Manell | 15da1db0f0abf734cd638184d46015357de02612 | 745e552ac956c5bf087943dd3f075dede9c212ac | refs/heads/master | 2021-01-01T04:37:29.080726 | 2019-03-11T00:43:40 | 2019-03-11T00:43:40 | 97,210,987 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | from main import app
from controller import user_role
from conf.config import listen_port
from main import api
api.add_resource(user_role.Userinfo,'/getpage/')
app.run(debug = True, host ="192.168.0.73", port =5001, threaded = True)
| [
"www.hubiwu.com@qq.com"
] | www.hubiwu.com@qq.com |
4bc1cd2926d306da1aee5a69062e99ace95c5840 | 4ba5b11860b7f046622b3ece7db4e5213efcec6e | /odoo/custom/src/private/faf_sale_project/__manifest__.py | fab3b7e7c338655cd14fe20e027298ca1fb6dcc2 | [
"BSL-1.0",
"AGPL-3.0-only",
"AGPL-3.0-or-later"
] | permissive | newtratip/faf | 94719c3558f65791caf6be4fb084ce1d7bd28fae | bf22486d5d6849c94db9f56f90dd05c0563fce28 | refs/heads/master | 2023-03-29T14:37:54.968201 | 2021-04-08T11:33:26 | 2021-04-08T11:33:26 | 328,696,058 | 0 | 0 | BSL-1.0 | 2021-04-08T11:33:27 | 2021-01-11T14:48:13 | HTML | UTF-8 | Python | false | false | 635 | py | # Copyright 2021 Ecosoft Co., Ltd. (http://ecosoft.co.th)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "FAF - Sale Project",
"summary": "Enhance sale_project module",
"version": "14.0.1.0.0",
"category": "FAF",
"website": "http://ecosoft.co.th",
"author": "Tharathip C., Ecosoft",
"depends": [
"sale_project",
"project_status",
"sale_order_type",
"faf_sale",
],
"data": [
"views/project_views.xml",
"views/sale_views.xml",
],
"license": "AGPL-3",
"installable": True,
"maintainers": ["newtratip"],
}
| [
"tharathip.chaweewongphan@gmail.com"
] | tharathip.chaweewongphan@gmail.com |
3028f4140ed5f9dc3677bd9696eb0a366ad48b9a | a32b09a9a17c081c134d770d1da16d36dfef8951 | /ptah/testing.py | 673d0c5e4b51ab096994e799cd31b39fae0dafd4 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | webmaven/ptah | 8c1b01506736a51d25e09a79dbd648ce4891429b | 98b3afc35e2b21f0b5faed594030ddf9d7297d2e | refs/heads/master | 2021-01-18T03:46:27.575850 | 2012-05-09T05:08:51 | 2012-05-09T05:08:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,828 | py | """ base class """
import sys
import sqlalchemy
import transaction
import pkg_resources
from zope.interface import directlyProvides
from pyramid import testing
from pyramid.interfaces import \
IRequest, IAuthenticationPolicy, IAuthorizationPolicy
from pyramid.interfaces import IRouteRequest
from pyramid.view import render_view, render_view_to_response
from pyramid.path import package_name
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.authentication import AuthTktAuthenticationPolicy
if sys.version_info[:2] == (2, 6): # pragma: no cover
import unittest2 as unittest
from unittest2 import TestCase
else:
import unittest
from unittest import TestCase
import ptah
from ptah import config
class PtahTestCase(TestCase):
_init_ptah = True
_init_sqla = True
_includes = ()
_auto_commit = True
_settings = {'sqlalchemy.url': 'sqlite://'}
_packages = ()
_trusted_manage = True
_environ = {
'wsgi.url_scheme':'http',
'wsgi.version':(1,0),
'HTTP_HOST': 'example.com',
'SCRIPT_NAME': '',
'PATH_INFO': '/',}
def make_request(self, environ=None, request_iface=IRequest, **kwargs):
if environ is None:
environ=self._environ
request = testing.DummyRequest(environ=environ, **kwargs)
request.request_iface = IRequest
return request
def init_ptah(self, *args, **kw):
self.registry.settings.update(self._settings)
self.config.include('ptah')
for pkg in self._includes: # pragma: no cover
self.config.include(pkg)
pkg = package_name(sys.modules[self.__class__.__module__])
if pkg != 'ptah':
packages = []
parts = self.__class__.__module__.split('.')
for l in range(len(parts)):
pkg = '.'.join(parts[:l+1])
if pkg == 'ptah' or pkg.startswith('ptah.') or \
pkg in self._includes:
continue # pragma: no cover
try:
self.config.include(pkg)
except: # pragma: no cover
pass
self.config.scan(self.__class__.__module__)
self.config.commit()
self.config.autocommit = self._auto_commit
self.config.ptah_init_settings()
ptah.reset_session()
if self._init_sqla:
# create engine
self.config.ptah_init_sql()
# create sql tables
Base = ptah.get_base()
Base.metadata.create_all()
transaction.commit()
if self._trusted_manage:
def trusted(*args):
return True
ptah.manage.set_access_manager(trusted)
def init_pyramid(self):
self.request = request = self.make_request()
self.config = testing.setUp(
request=request, settings=self._settings, autocommit=False)
self.config.get_routes_mapper()
self.registry = self.config.registry
self.request.registry = self.registry
def setUp(self):
self.init_pyramid()
if self._init_ptah:
self.init_ptah()
def tearDown(self):
import ptah.util
ptah.util.tldata.clear()
import ptah.security
ptah.security.DEFAULT_ACL[:] = []
from ptah.config import ATTACH_ATTR
mod = sys.modules[self.__class__.__module__]
if hasattr(mod, ATTACH_ATTR):
delattr(mod, ATTACH_ATTR)
testing.tearDown()
transaction.abort()
def render_route_view(self, context, request, route_name, view=''): # pragma: no cover
directlyProvides(
request, self.registry.getUtility(IRouteRequest, route_name))
return render_view_to_response(context, request, view)
| [
"fafhrd91@gmail.com"
] | fafhrd91@gmail.com |
4df11f774fd9e4ab12f02cd8057cf8221675aafc | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/masking-personal-information/408960920.py | 32a0c0921e5ec105e0cd3e61b1685ed224acbd4c | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # title: masking-personal-information
# detail: https://leetcode.com/submissions/detail/408960920/
# datetime: Thu Oct 15 14:44:30 2020
# runtime: 24 ms
# memory: 14.1 MB
class Solution:
def maskPII(self, S: str) -> str:
at = S.find('@')
if at >= 0:
return (S[0] + '*' * 5 + S[at - 1:]).lower()
digits = [c for c in S if c.isdigit()]
if len(digits) == 10:
return '***-***-{}'.format(''.join(digits[-4:]))
return '+{}-***-***-{}'.format('*' * (len(digits) - 10), ''.join(digits[-4:]))
| [
"ljm51689@gmail.com"
] | ljm51689@gmail.com |
b017fa7e894b1aedb746f2fb6f1be61407cce1f2 | 058f6cf55de8b72a7cdd6e592d40243a91431bde | /tests/clang_plugin/dynamic/test_fp32_overflow_found/test_fp32_overflow_found.py | c78364d0304f5e18ea782535a92d541c87185be2 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LLNL/FPChecker | 85e8ebf1d321b3208acee7ddfda2d8878a238535 | e665ef0f050316f6bc4dfc64c1f17355403e771b | refs/heads/master | 2023-08-30T23:24:43.749418 | 2022-04-14T19:57:44 | 2022-04-14T19:57:44 | 177,033,795 | 24 | 6 | Apache-2.0 | 2022-09-19T00:09:50 | 2019-03-21T22:34:14 | Python | UTF-8 | Python | false | false | 1,816 | py | #!/usr/bin/env python
import subprocess
import os
def setup_module(module):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(THIS_DIR)
def teardown_module(module):
cmd = ["make clean"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
# returns: tuple (error, op, file, line)
#
#+-------------------------- FPChecker Warning Report --------------------------+
# Error : Underflow
# Operation : ADD
# File : dot_product.cu
# Line : 9
#+------------------------------------------------------------------------------+
#
def getFPCReport(lines):
ret = ("", "", "", "")
for i in range(len(lines)):
l = lines[i]
if "FPChecker" in l and "Report" in l and "+" in l:
err = lines[i+1].split()[2]
op = lines[i+2].split()[2]
f = lines[i+3].split()[2]
line = lines[i+4].split()[2]
ret = (err, op, f, line)
break
return ret
def test_1():
# --- compile code ---
cmd = ["make"]
try:
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print(e.output)
exit()
# --- run code ---
cmd = ["./main"]
try:
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print(e.output)
exit()
rep = getFPCReport(cmdOutput.decode('utf-8').split("\n"))
assert rep[0] == 'INF'
assert rep[3] == '8'
| [
"ilaguna@llnl.gov"
] | ilaguna@llnl.gov |
d1ed81e74acd860b2a60472a9908d5c19b953515 | 5dac0010edb884cd6d412954c79b75fa946e252d | /101-AWS-S3-Hacks/last_modified.py | b91aeb13f0761f01f9f3703ebe255acf10820cb1 | [] | no_license | ralic/aws_hack_collection | c1e1a107aa100e73b6e5334ed9345576057bdc9d | 7b22018169e01d79df7416dd149c015605dea890 | refs/heads/master | 2023-01-09T04:31:57.125028 | 2020-02-06T11:21:39 | 2020-02-06T11:21:39 | 90,350,262 | 3 | 1 | null | 2022-12-26T20:03:05 | 2017-05-05T07:39:34 | Python | UTF-8 | Python | false | false | 460 | py | #!/usr/bin/python
"""
- Author : Nag m
- Hack : List all the objects last modified timestamp in Zulu format
- Info : List all the objects last modified timestamp in Zulu format
* 101-s3-aws
"""
import boto
def modified(name):
bucket = conn.get_bucket(name)
lt = bucket.list()
for obj in lt:
print obj.last_modified
if __name__ == "__main__":
conn = boto.connect_s3()
bucketname = "101-s3-aws"
modified(bucketname)
| [
"raliclo@gmail.com"
] | raliclo@gmail.com |
1f3e6552b2041938ee45c8f4cf410cbcd65fad3d | 7ca4838ab8871cb78e2fcf119a252d23e2bc89c5 | /samples/generated_samples/logging_v2_generated_config_service_v2_update_view_sync.py | 33014bf236582bd759d03fd778342ff3867a3036 | [
"Apache-2.0"
] | permissive | googleapis/python-logging | abb25a7a34306527c37bb68e98bfb4d6f1647e1b | 1037afccd1436a152aa229fa98f35ec83c723d06 | refs/heads/main | 2023-08-31T10:06:49.191395 | 2023-08-29T13:28:36 | 2023-08-29T13:28:36 | 226,992,562 | 109 | 56 | Apache-2.0 | 2023-09-12T16:13:25 | 2019-12-10T00:09:45 | Python | UTF-8 | Python | false | false | 1,814 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateView
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_v2_generated_ConfigServiceV2_UpdateView_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import logging_v2
def sample_update_view():
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
request = logging_v2.UpdateViewRequest(
name="name_value",
)
# Make the request
response = client.update_view(request=request)
# Handle the response
print(response)
# [END logging_v2_generated_ConfigServiceV2_UpdateView_sync]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.