blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0f071c15769ac8ce89c01b772e739236f4fc6829
|
19f0dfcae32a89907b489a7729ddf6a9c958d16d
|
/inference.py
|
1d7e4cd9f8917b15bde561eaca83a8b7c6ae18df
|
[] |
no_license
|
HoiBunCa/ELA
|
f231422d135210f836a882070f43af9d9f14a134
|
6e3ce729f69e0a96fa23e09b3b870efb8b3deb44
|
refs/heads/master
| 2023-07-18T09:59:38.883441
| 2021-09-06T02:52:22
| 2021-09-06T02:52:22
| 403,464,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
import matplotlib.pyplot as plt
import time
import os
import copy
from tqdm import tqdm
from PIL import Image
from PIL import ImageChops
from PIL import ImageEnhance
from torchvision import datasets, models, transforms
from torch.optim import lr_scheduler
# In[2]:
model = torch.load("model_ela.pt")
# In[3]:
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
# In[4]:
model = model.to(device)
# In[5]:
class_names = ['fake', 'real']
# In[6]:
acc = torch.nn.Softmax(dim=1)
# In[7]:
# filename = '1.jpg'
def inference_img(filename):
basename, extension = os.path.splitext(filename)
resaved = 'resaved.jpg'
ela = 'ela.png'
im = Image.open(filename)
im.save(resaved, 'JPEG', quality=90)
resaved_im = Image.open(resaved)
ela_im = ImageChops.difference(im, resaved_im)
extrema = ela_im.getextrema()
max_diff = max([ex[1] for ex in extrema])
scale = 255.0/max_diff
ela_im = ImageEnhance.Brightness(ela_im).enhance(scale)
# print('Maximum difference was {}'.format(max_diff))
ela_im.save(ela)
img_test = Image.open(ela)
img_transforms = data_transforms(img_test)
img_unsquueeze = img_transforms.unsqueeze(0).to(device)
model.eval().to(device)
output = model(img_unsquueeze)
_, preds = torch.max(output, 1)
return class_names[int(preds)], max(max(acc(output))).item()
# In[11]:
filename = 'D:/Code/Tima_Onbroading/ELA/datatest_private/fake/CMND MAT SAU 2.jpg'
label, score = inference_img(filename)
print(label)
print(score)
# In[ ]:
|
[
"boybka23@gmail.com"
] |
boybka23@gmail.com
|
44d34ce7e096066329be1d20c421e3965ebda775
|
aecf0a868968f814879fc57e779432db96dafb4f
|
/yatube/apps/posts/migrations/0010_auto_20201207_1733.py
|
8f0e8ef72806205df21555fcd009b40bf1ab4b99
|
[
"BSD-3-Clause"
] |
permissive
|
azharkih/PetBlog
|
5a0089a51da08055b70eb37dc4027bfcde792da6
|
691d5e2ebdff0148e195d84442ff52acc0f036c3
|
refs/heads/main
| 2023-04-22T05:52:55.859615
| 2021-05-18T08:19:29
| 2021-05-18T08:19:29
| 305,399,168
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
# Generated by Django 2.2.6 on 2020-12-07 17:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0009_auto_20201206_1012'),
]
operations = [
migrations.AlterUniqueTogether(
name='follow',
unique_together=set(),
),
migrations.AddConstraint(
model_name='follow',
constraint=models.UniqueConstraint(fields=('user', 'author'), name='unique_following'),
),
]
|
[
"andreyzharkih@gmail.com"
] |
andreyzharkih@gmail.com
|
70d2ac41c252f819bd4652c5c0acabc6610a5be3
|
f97b7852aafe629de03323b15e2e075e5893289a
|
/Ipl/asgi.py
|
87131a21a526b69b5b13d1f882dc10ddeb8e1516
|
[] |
no_license
|
ravilucky231/Ipl-project
|
28adbc309446a135309089a5a02da8654c0fd24a
|
57c43d775e351673fa63a8623f31f25fb7d3ef92
|
refs/heads/master
| 2023-06-17T00:44:56.008578
| 2021-07-12T12:08:35
| 2021-07-12T12:08:35
| 385,235,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
"""
ASGI config for Ipl project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Ipl.settings')
application = get_asgi_application()
|
[
"ravikumar23797@gmail.com"
] |
ravikumar23797@gmail.com
|
bb07aae1f945712d767a0c75bcd232506bd977b1
|
107ca5f90d40f0caa5a71b9aa314e61a2b0c9a84
|
/src/preprocess.py
|
fed23abc7454c3b3128860e7dafc1db3e1dba4a1
|
[] |
no_license
|
dondakeshimo/predicting-molecular-properties
|
8cd9841ae55dd04b3b2fbcc2a712f21808ba1ce0
|
4cfc6dcdfd35e8a462dbfffcf11743b6375f56f4
|
refs/heads/master
| 2021-07-12T04:56:39.003611
| 2019-07-09T12:40:03
| 2019-07-09T12:40:03
| 193,218,654
| 1
| 1
| null | 2020-09-25T23:54:21
| 2019-06-22T09:59:20
|
Python
|
UTF-8
|
Python
| false
| false
| 11,107
|
py
|
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
import utils
def map_atom_info(df, structures, atom_idx):
print("Merge structures with train dataframe")
df = pd.merge(df, structures, how="left",
left_on=["molecule_name", f"atom_index_{atom_idx}"],
right_on=["molecule_name", "atom_index"])
df = df.drop("atom_index", axis=1)
df = df.rename(columns={
"atom": f"atom_{atom_idx}",
"x": f"x_{atom_idx}",
"y": f"y_{atom_idx}",
"z": f"z_{atom_idx}",
"n_bonds": f"n_bonds_{atom_idx}",
"bond_lengths_mean": f"bond_lengths_mean_{atom_idx}",
"bond_lengths_std": f"bond_lengths_std_{atom_idx}"
})
return df
def calc_dist(df):
print("Calculate distance between atom")
df_p_0 = df[["x_0", "y_0", "z_0"]].values
df_p_1 = df[["x_1", "y_1", "z_1"]].values
df["dist"] = np.linalg.norm(df_p_0 - df_p_1, axis=1)
df["dist_x"] = (df["x_0"] - df["x_1"]) ** 2
df["dist_y"] = (df["y_0"] - df["y_1"]) ** 2
df["dist_z"] = (df["z_0"] - df["z_1"]) ** 2
df["dist_div_p3"] = 1 / (df["dist"].replace(0, 1e-10) ** 3)
return df
def create_features_full(df):
print("Create full brute force features")
df["molecule_couples"] = \
df.groupby("molecule_name")["id"].transform("count")
df["molecule_dist_mean"] = \
df.groupby("molecule_name")["dist"].transform("mean")
df["molecule_dist_min"] = \
df.groupby("molecule_name")["dist"].transform("min")
df["molecule_dist_max"] = \
df.groupby("molecule_name")["dist"].transform("max")
df["atom_0_couples_count"] = \
df.groupby(["molecule_name", "atom_index_0"])["id"].transform("count")
df["atom_1_couples_count"] = \
df.groupby(["molecule_name", "atom_index_1"])["id"].transform("count")
num_cols = ["x_1", "y_1", "z_1",
"dist", "dist_x", "dist_y", "dist_z", "dist_div_p3"]
cat_cols = ["atom_index_0", "atom_index_1", "type", "atom_1", "type_0"]
aggs = ["mean", "max", "std", "min"]
for col in cat_cols:
df[f"molecule__{col}__count"] = \
df.groupby("molecule_name")[col].transform("count")
for cat_col in tqdm(cat_cols):
for num_col in tqdm(num_cols):
for agg in aggs:
col = f"molecule__{cat_col}__{num_col}__{agg}"
df[col] = df.groupby(["molecule_name", cat_col])[num_col] \
.transform(agg)
if agg == "std":
df[col] = df[col].fillna(0)
df[col + "__diff"] = df[col] - df[num_col]
df[col + "__div"] = df[col] / df[num_col].replace(0, 1e-10)
return df
def create_basic_features(df):
print("Create basic static features")
df["molecule_couples"] = \
df.groupby("molecule_name")["id"].transform("count")
df["molecule_dist_mean"] = \
df.groupby("molecule_name")["dist"].transform("mean")
df["molecule_dist_min"] = \
df.groupby("molecule_name")["dist"].transform("min")
df["molecule_dist_max"] = \
df.groupby("molecule_name")["dist"].transform("max")
df["atom_0_couples_count"] = \
df.groupby(["molecule_name", "atom_index_0"])["id"].transform("count")
df["atom_1_couples_count"] = \
df.groupby(["molecule_name", "atom_index_1"])["id"].transform("count")
return df
def create_extra_features(df, good_columns):
print("Create brute force features in good columns")
columns = [g.split("__") for g in good_columns]
columns = sorted(columns, key=lambda x: len(x))
for cols in tqdm(columns):
if len(cols) == 1:
continue
elif len(cols) == 3:
_, col, _ = cols
df[f"molecule__{col}__count"] = \
df.groupby("molecule_name")[col].transform("count")
elif len(cols) == 4:
_, cat, num, agg = cols
col = f"molecule__{cat}__{num}__{agg}"
df[col] = df.groupby(["molecule_name", cat])[num] \
.transform(agg)
if agg == "std":
df[col] = df[col].fillna(0)
elif len(cols) == 5:
_, cat, num, agg, cal = cols
col = f"molecule__{cat}__{num}__{agg}"
if col not in df.columns:
df[col] = df.groupby(["molecule_name", cat])[num] \
.transform(agg)
if agg == "std":
df[col] = df[col].fillna(0)
if cal == "diff":
df[col + "__diff"] = df[col] - df[num]
if cal == "div":
df[col + "__div"] = df[col] / df[num].replace(0, 1e-10)
return df
def get_good_columns(file_folder="../data"):
print(f"Get good columns from {file_folder}/preprocessed/feat...ance.csv")
importance = pd.read_csv(
f"{file_folder}/preprocessed/feature_importance.csv")
span = len(importance) // 8
importance_set = set()
for i in range(8):
for column in importance.iloc[span * i:span * (i + 1)] \
.groupby(["feature"]).mean() \
.sort_values(by=["importance"], ascending=False) \
.index[:50]:
importance_set.add(column)
good_columns = list(importance_set)
good_columns.append("type")
return good_columns
def get_atom_rad_en(structures):
print("Add atom radius and lelectro negativity to structures")
atomic_radius = {"H": 0.38, "C": 0.77, "N": 0.75, "O": 0.73, "F": 0.71}
fudge_factor = 0.05
atomic_radius = {k: v + fudge_factor for k, v in atomic_radius.items()}
electronegativity = {"H": 2.2, "C": 2.55, "N": 3.04, "O": 3.44, "F": 3.98}
atoms = structures["atom"].values
atoms_en = [electronegativity[x] for x in atoms]
atoms_rad = [atomic_radius[x] for x in atoms]
structures["EN"] = atoms_en
structures["rad"] = atoms_rad
return structures
def calc_bonds(structures):
i_atom = structures["atom_index"].values
p = structures[["x", "y", "z"]].values
p_compare = p
m = structures["molecule_name"].values
m_compare = m
r = structures["rad"].values
r_compare = r
source_row = np.arange(len(structures))
max_atoms = 28
bonds = np.zeros((len(structures) + 1, max_atoms + 1),
dtype=np.int8)
bond_dists = np.zeros((len(structures) + 1, max_atoms + 1),
dtype=np.float32)
print("Calculating bonds")
for i in tqdm(range(max_atoms - 1)):
p_compare = np.roll(p_compare, -1, axis=0)
m_compare = np.roll(m_compare, -1, axis=0)
r_compare = np.roll(r_compare, -1, axis=0)
# Are we still comparing atoms in the same molecule?
mask = np.where(m == m_compare, 1, 0)
dists = np.linalg.norm(p - p_compare, axis=1) * mask
r_bond = r + r_compare
bond = np.where(np.logical_and(dists > 0.0001, dists < r_bond), 1, 0)
source_row = source_row
target_row = source_row + i + 1
target_row = np.where(
np.logical_or(target_row > len(structures), mask == 0),
len(structures), target_row)
source_atom = i_atom
target_atom = i_atom + i + 1
target_atom = np.where(
np.logical_or(target_atom > max_atoms, mask == 0),
max_atoms, target_atom)
bonds[(source_row, target_atom)] = bond
bonds[(target_row, source_atom)] = bond
bond_dists[(source_row, target_atom)] = dists
bond_dists[(target_row, source_atom)] = dists
bonds = np.delete(bonds, axis=0, obj=-1)
bonds = np.delete(bonds, axis=1, obj=-1)
bond_dists = np.delete(bond_dists, axis=0, obj=-1)
bond_dists = np.delete(bond_dists, axis=1, obj=-1)
print("Counting and condensing bonds")
bonds_numeric = [
[i for i, x in enumerate(row) if x]
for row in tqdm(bonds)
]
bond_lengths = [
[dist for i, dist in enumerate(row) if i in bonds_numeric[j]]
for j, row in enumerate(tqdm(bond_dists))
]
bond_lengths_mean = [np.mean(x) for x in tqdm(bond_lengths)]
bond_lengths_std = [np.std(x) for x in tqdm(bond_lengths)]
n_bonds = [len(x) for x in tqdm(bonds_numeric)]
bond_data = {"n_bonds": n_bonds,
"bond_lengths_mean": bond_lengths_mean,
"bond_lengths_std": bond_lengths_std}
bond_df = pd.DataFrame(bond_data)
structures = structures.join(bond_df)
return structures
def encode_str(train, test, good_columns):
print("Encoding strings")
for f in ["atom_0", "atom_1", "type_0", "type"]:
if f in good_columns:
lbl = LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
return train, test
def preprocess(train, test, structures, contrib):
train = pd.merge(train, contrib, how="left",
left_on=["molecule_name", "atom_index_0",
"atom_index_1", "type"],
right_on=["molecule_name", "atom_index_0",
"atom_index_1", "type"])
structures = get_atom_rad_en(structures)
structures = calc_bonds(structures)
train = map_atom_info(train, structures, 0)
train = map_atom_info(train, structures, 1)
test = map_atom_info(test, structures, 0)
test = map_atom_info(test, structures, 1)
train = calc_dist(train)
test = calc_dist(test)
train["type_0"] = train["type"].apply(lambda x: x[0])
test["type_0"] = test["type"].apply(lambda x: x[0])
good_columns = get_good_columns()
train = create_basic_features(train)
test = create_basic_features(test)
train = create_extra_features(train, good_columns)
test = create_extra_features(test, good_columns)
train, test = encode_str(train, test, good_columns)
return train, test
def create_feature_importance(train, test, structures, contrib):
train = pd.merge(train, contrib, how="left",
left_on=["molecule_name", "atom_index_0",
"atom_index_1", "type"],
right_on=["molecule_name", "atom_index_0",
"atom_index_1", "type"])
structures = get_atom_rad_en(structures)
structures = calc_bonds(structures)
train = train.sample(frac=0.5).reset_index(drop=True)
train = map_atom_info(train, structures, 0)
train = map_atom_info(train, structures, 1)
train = calc_dist(train)
train["type_0"] = train["type"].apply(lambda x: x[0])
utils.show_mem_usage(train)
train = create_features_full(train)
utils.show_mem_usage(train)
print("Encoding strings")
for f in ["atom_0", "atom_1", "type_0", "type"]:
lbl = LabelEncoder()
lbl.fit(list(train[f].values))
train[f] = lbl.transform(list(train[f].values))
return train, test
|
[
"went.went.takkun135@gmail.com"
] |
went.went.takkun135@gmail.com
|
3461812b9d0cecd1cdaa91a09e231df8c71459ba
|
01f434d0f96cb5bc2475d675e0082dfe1b5b240e
|
/DQN/DQN-Distributed.py
|
89f1e0c4a27b8a967c4629bfcbeb3c2ca55fa6f3
|
[] |
no_license
|
david-simoes-93/GeoFriends2
|
2212fb0e27265680218a2d3f7a0b0d3e439ae875
|
5ef8fbdedc8d518b42deca4574771436d65371f7
|
refs/heads/master
| 2022-03-05T22:47:47.091191
| 2019-11-13T10:20:18
| 2019-11-13T10:20:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,022
|
py
|
# While training is taking place, statistics on agent performance are available from Tensorboard. To launch it use:
#
# tensorboard --logdir=worker_0:'./train_0',worker_1:'./train_1',worker_2:'./train_2',worker_3:'./train_3'
# tensorboard --logdir=worker_0:'./train_0'
# tensorboard --logdir=worker_0:'./train_0',worker_1:'./train_1',worker_2:'./train_2',worker_3:'./train_3',worker_4:'./train_4',worker_5:'./train_5',worker_6:'./train_6',worker_7:'./train_7',worker_8:'./train_8',worker_9:'./train_9',worker_10:'./train_10',worker_11:'./train_11'
import argparse
import os
import tensorflow as tf
from DQN.DQNetwork import QNetwork1Step
from DQN.DQNSlave import WorkerGF2
from simulator.GymEnvGF import GymEnvGF
max_episode_length = 4000
gamma = .99 # discount rate for advantage estimation and reward discounting
state_size_square = 9
state_size_circle = 11
height = 1
number_of_cell_types = 1
learning_rate = 1e-5
action_size_square = 4
action_size_circle = 4
model_path = './model_dist'
use_lstm = False
use_conv_layers = False
display = True
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--task_index",
type=int,
default=0,
help="Index of task within the job"
)
parser.add_argument(
"--slaves_per_url",
type=str,
default="1",
help="Comma-separated list of maximum tasks within the job"
)
parser.add_argument(
"--urls",
type=str,
default="localhost",
help="Comma-separated list of hostnames"
)
parser.add_argument(
"--learning",
type=int,
default=1,
help="0 no one learning; 1 square learning; 2 circle learning; 3 both learning"
)
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.learning == 0: # train neither
circle_learning = False
square_learning = False
elif FLAGS.learning == 1: # train square
circle_learning = False
square_learning = True
elif FLAGS.learning == 2: # train circle
circle_learning = True
square_learning = False
elif FLAGS.learning == 3: # train both
circle_learning = True
square_learning = True
# Create a cluster from the parameter server and worker hosts.
hosts = []
for (url, max_per_url) in zip(FLAGS.urls.split(","), FLAGS.slaves_per_url.split(",")):
for i in range(int(max_per_url)):
hosts.append(url + ":" + str(2210 + i))
cluster = tf.train.ClusterSpec({"dqn": hosts})
server = tf.train.Server(cluster, job_name="dqn", task_index=FLAGS.task_index)
tf.reset_default_graph()
# Create a directory to save models
if not os.path.exists(model_path):
os.makedirs(model_path)
with tf.device(tf.train.replica_device_setter(worker_device="/job:dqn/task:%d" % FLAGS.task_index, cluster=cluster)):
global_episodes = tf.contrib.framework.get_or_create_global_step()
trainer_square = tf.train.AdamOptimizer(learning_rate=learning_rate)
trainer_circle = tf.train.AdamOptimizer(learning_rate=learning_rate)
master_network_square = QNetwork1Step(state_size_square, action_size_square, 'global_square',
None, use_conv_layers, use_lstm) # Generate global network
master_network_circle = QNetwork1Step(state_size_circle, action_size_circle, 'global_circle',
None, use_conv_layers, use_lstm) # Generate global network
# Master declares worker for all slaves
for i in range(len(hosts)):
print("Initializing variables for slave ", i)
if i == FLAGS.task_index:
worker = WorkerGF2(GymEnvGF(rectangle=square_learning, circle=circle_learning),
i, state_size_square, state_size_circle, action_size_square, action_size_circle,
trainer_square, trainer_circle, model_path,
global_episodes, use_lstm, use_conv_layers, display,
rectangle_learning=square_learning, circle_learning=circle_learning)
else:
WorkerGF2(None,
i, state_size_square, state_size_circle, action_size_square, action_size_circle,
trainer_square, trainer_circle, model_path,
global_episodes, use_lstm, use_conv_layers, False,
rectangle_learning=square_learning, circle_learning=circle_learning)
print("Starting session", server.target, FLAGS.task_index)
hooks = [tf.train.StopAtStepHook(last_step=100000)]
with tf.train.MonitoredTrainingSession(master=server.target, is_chief=(FLAGS.task_index == 0),
config=tf.ConfigProto(), # config=tf.ConfigProto(log_device_placement=True),
save_summaries_steps=100,
save_checkpoint_secs=600, checkpoint_dir=model_path, hooks=hooks) as mon_sess:
print("Started session")
try:
worker.work(max_episode_length, gamma, mon_sess)
except RuntimeError:
print("Puff")
print("Done")
|
[
"david.simoes@ua.pt"
] |
david.simoes@ua.pt
|
b9a7fa1f314effcd4db89f89c729e12b6a1e82c7
|
3ff99e2d39a9faf807715b7b8b6c22f112729f96
|
/setup.py
|
66b86394deab623ca740334ffa3425e163c88b84
|
[] |
no_license
|
agramfort/sphinx-gallery
|
577a3eeb60650c7808918fa9f49ac7aace759640
|
7beb0402184cc09d727b8514d9a5ec56567a2d58
|
refs/heads/master
| 2023-07-06T19:41:53.362333
| 2015-02-05T10:17:35
| 2015-02-05T10:17:35
| 30,350,251
| 0
| 0
| null | 2015-07-15T16:33:58
| 2015-02-05T10:33:57
|
Python
|
UTF-8
|
Python
| false
| false
| 755
|
py
|
# -*- coding: utf-8 -*-
"""
Installer Sphinx extension for gallery generator
"""
from setuptools import setup, find_packages
import sphinxgallery
with open('README.rst') as f:
long_description = f.read()
setup(
name="sphinx-gallery",
description="Sphinx extension to automatically generate an examples gallery",
long_description=long_description,
version=sphinxgallery.__version__,
packages=find_packages(),
package_data={'sphinxgallery': ['_static/gallery.css', '_static/no_image.png']},
url="https://github.com/sphinx-gallery/sphinx-gallery",
author="Óscar Nájera",
author_email='najera.oscar@gmail.com',
install_requires=['Sphinx', 'matplotlib', 'pillow', 'joblib'],
setup_requires=['nose>=1.0']
)
|
[
"najera.oscar@gmail.com"
] |
najera.oscar@gmail.com
|
d7d9cfef46cc29c32cb58ad91d797ec6134461e1
|
e2104b3dc1ce8388e4322f0affc2b40ba9268fa5
|
/tts.py
|
64cc9572cdbbfa323118fb876cfd59f38adb7378
|
[] |
no_license
|
amandal1810/micro-projects
|
131f14dac5ac49fc1f727c588c0b3e50596403e3
|
7c2d2964db97fd51877e0e7067cd71fc23464ebf
|
refs/heads/master
| 2020-06-02T18:06:15.317715
| 2015-04-21T05:20:16
| 2015-04-21T05:20:16
| 34,304,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
#type in a text as input and hear it in voice! A small Text-to-Speech Convereter using the Yahoo TTS API. Voice will play in your default media player.
import urllib2, urllib
import os
baseurl = "http://tts-api.com/tts.mp3?"
print "enter text to convert to speech : "
text = raw_input()
api_url = baseurl + urllib.urlencode({'q':text})
result = urllib2.urlopen(api_url).read()
f = open('tts.mp3', 'wb')
f.write(result)
f.close()
os.system("start tts.mp3")
|
[
"amandal1810@gmail.com"
] |
amandal1810@gmail.com
|
7407743f54f6be332092091cc5aeff85ebab3e76
|
c420b322c305a4ec940acabb9aa756b9315a3f1d
|
/tests/google_test.py
|
5d6d79db53dccabf9e4a05c94791206b91ad0098
|
[
"MIT"
] |
permissive
|
360fun/fest
|
1a13fa8ffdec93c85d066b18b9fa9eb4010cb461
|
f624ecd8569d1d640a83a1ee9d872237d303ca3e
|
refs/heads/master
| 2020-05-19T14:14:22.959398
| 2019-05-01T16:53:36
| 2019-05-01T16:53:36
| 185,056,247
| 0
| 0
|
MIT
| 2019-05-05T16:32:15
| 2019-05-05T16:32:14
| null |
UTF-8
|
Python
| false
| false
| 9,694
|
py
|
from unittest import mock
import pytest
from fest import facebook
from fest import google
from fest import utils
def test_google_page_iter_events():
mockapi = mock.MagicMock()
mockapi.events.return_value.list.return_value.execute.side_effect = [
{
'items': [{'id': '1'}, {'id': '2'}],
'nextPageToken': 'fizz',
},
{
'items': [{'id': '3'}, {'id': '4'}],
},
]
gcal = google.GoogleCalendar(mockapi, 'MyGCal')
ret = gcal.get_events().filter(lambda x: x['id'] < '4').execute()
exp = [{'id': '1'}, {'id': '2'}, {'id': '3'}]
assert ret == exp
def test_google_page_sync():
mockf = mock.MagicMock()
mockg = mock.MagicMock()
fevents = [
{
'id': '1',
'start_time': '2018-12-12T12:00:00-0500',
'end_time': '2018-12-12T13:00:00-0500',
'description': 'some description 1',
'name': 'Event 1',
'place': {
'name': 'Boston Public Library',
'location': {
'city': 'Boston',
'country': 'United States',
'state': 'MA',
'street': '700 Boylston St',
'zip': '02116',
},
},
},
{
'id': '2',
'start_time': '2018-12-13T12:00:00-0500',
'end_time': '2018-12-13T13:00:00-0500',
'description': 'some description 2',
'name': 'Event 2',
'place': {
'name': 'Boston Public Library',
'location': {
'city': 'Boston',
'country': 'United States',
'state': 'MA',
'street': '700 Boylston St',
'zip': '02116',
},
},
},
{
'id': '3',
'start_time': '2018-12-14T12:00:00-0500',
'end_time': '2018-12-14T13:00:00-0500',
'description': 'some description 3',
'name': 'Event 3',
'place': {
'name': 'Boston Public Library',
'location': {
'city': 'Boston',
'country': 'United States',
'state': 'MA',
'street': '700 Boylston St',
'zip': '02116',
},
},
},
]
gevents = [
{
'id': '1',
'summary': 'Event 1',
'extendedProperties': {
'private': {
'facebookId': '1',
'facebookPageId': 'MyPage',
'facebookDigest':
'c572922673ad8110b615238f8c48cd38ee156bdc',
}
}
},
{
'id': '2',
'summary': 'Event 2',
'extendedProperties': {
'private': {
'facebookId': '2',
'facebookPageId': 'MyPage',
'facebookDigest': 'OUTDATED',
}
}
},
{
'id': '4',
'summary': 'Event 4',
'extendedProperties': {
'private': {
'facebookId': '4',
'facebookPageId': 'MyPage',
'facebookDigest': '',
}
}
}
]
mockf.get_object.side_effect = [{'data': fevents}]
mockf.get_objects.side_effect = [{x['id']: x for x in fevents}]
mockg.events.return_value.list.return_value.execute.side_effect = \
[{'items': gevents}]
gcal = google.GoogleCalendar(mockg, 'MyGCal')
page = facebook.FacebookPage(mockf, 'MyPage')
ret = gcal.sync(page, time_filter='upcoming').execute()
mockg.events.return_value.insert.assert_called_once_with(
calendarId='MyGCal',
body={
'summary': 'Event 3',
'description': 'some description 3\n\nhttps://www.facebook.com/3',
'location':
'Boston Public Library '
'700 Boylston St '
'Boston MA United States 02116',
'start': {
'dateTime': '2018-12-14T12:00:00-05:00',
'timeZone': 'UTC-05:00',
},
'end': {
'dateTime': '2018-12-14T13:00:00-05:00',
'timeZone': 'UTC-05:00',
},
'extendedProperties': {
'private': {
'facebookDigest':
'6a1960a370ba8f16031d729ebfdbccb1110b5fd7',
'facebookId': '3',
'facebookPageId': 'MyPage',
},
},
},
)
mockg.events.return_value.update.assert_called_once_with(
calendarId='MyGCal',
eventId='2',
body={
'summary': 'Event 2',
'description': 'some description 2\n\nhttps://www.facebook.com/2',
'location':
'Boston Public Library '
'700 Boylston St '
'Boston MA United States 02116',
'start': {
'dateTime': '2018-12-13T12:00:00-05:00',
'timeZone': 'UTC-05:00',
},
'end': {
'dateTime': '2018-12-13T13:00:00-05:00',
'timeZone': 'UTC-05:00',
},
'extendedProperties': {
'private': {
'facebookDigest':
'505f25b09ebde5a6e2587849d364d118ad740454',
'facebookId': '2',
'facebookPageId': 'MyPage',
},
},
},
)
mockg.events.return_value.delete.assert_called_once_with(
calendarId='MyGCal',
eventId='4',
)
@mock.patch('fest.utils.digest')
def test_google_page_sync_multibatch(mock_digest):
mock_digest.return_value = '<digest>'
mockf = mock.MagicMock()
mockg = mock.MagicMock()
items = range(0, 99)
mockf.get_object.side_effect = mockf.get_objects.side_effect = [
{
'data': [
{
'id': str(x),
'start_time': '2018-12-12T12:00:00-0500',
'end_time': '2018-12-12T13:00:00-0500',
'description': f'some description {x}',
'name': f'Event {x}',
'place': {
'name': 'Boston Public Library',
'location': {
'city': 'Boston',
'country': 'United States',
'state': 'MA',
'street': '700 Boylston St',
'zip': '02116',
},
},
}
for x in items
],
},
]
mockg.events.return_value.list.return_value.execute.side_effect = [
{
'items': [],
},
]
gcal = google.GoogleCalendar(mockg, 'MyGCal')
page = facebook.FacebookPage(mockf, 'MyPage')
gcal.sync(page, time_filter='upcoming').execute()
mockg.events.return_value.insert.assert_has_calls([
mock.call(
calendarId='MyGCal',
body={
'summary': f'Event {x}',
'description':
f'some description {x}\n\nhttps://www.facebook.com/{x}',
'location':
'Boston Public Library '
'700 Boylston St '
'Boston MA United States 02116',
'start': {
'dateTime': '2018-12-12T12:00:00-05:00',
'timeZone': 'UTC-05:00',
},
'end': {
'dateTime': '2018-12-12T13:00:00-05:00',
'timeZone': 'UTC-05:00',
},
'extendedProperties': {
'private': {
'facebookDigest': '<digest>',
'facebookId': str(x),
'facebookPageId': 'MyPage',
},
},
},
)
for x in items
])
mockg.new_batch_http_request.return_value.execute.assert_has_calls([
mock.call(),
mock.call(),
])
def test_google_page_sync_no_op():
mockf = mock.MagicMock()
mockg = mock.MagicMock()
mockf.get_object.side_effect = mockf.get_objects.side_effect = [
{
'data': [],
},
]
gcal = google.GoogleCalendar(mockg, 'MyGCal')
page = facebook.FacebookPage(mockf, 'MyPage')
sync = gcal.sync(page, time_filter='upcoming')
sync.filter(lambda x: x).execute()
mockg.new_batch_http_request.assert_not_called()
def test_callback():
mockapi = mock.MagicMock()
gcal = google.GoogleCalendar(mockapi, 'MyGCal')
page = facebook.FacebookPage(mockapi, 'MyPage')
sync = gcal.sync(page, time_filter='upcoming')
callback = sync.callbackgen('POST')
res = {
'extendedProperties': {
'private': {
'facebookId': '1'
},
},
}
callback('id', res, None)
assert sync.responses['POST'] == {'1': res}
def test_callback_err():
mockapi = mock.MagicMock()
gcal = google.GoogleCalendar(mockapi, 'MyGCal')
page = facebook.FacebookPage(mockapi, 'MyPage')
sync = gcal.sync(page, time_filter='upcoming')
callback = sync.callbackgen([])
with pytest.raises(ValueError):
callback('id', 'response', ValueError)
|
[
"amancevice@cargometrics.com"
] |
amancevice@cargometrics.com
|
1de65acee10ef24ba750717c71573dbe0048d961
|
c3843a5a787e54569387c29b9c0ab937798e650a
|
/resource-dispatcher/triggering/__init__.py
|
86a28a4b0dc8a4e82fdf5a194d3e21c561878189
|
[
"Apache-2.0"
] |
permissive
|
redhat-cop/tool-integrations
|
d7c16f07196447422ce880508bdd8df4e512e58c
|
028ebb29eb63006b42d2c6767a1a28814710c26d
|
refs/heads/master
| 2023-03-08T19:28:55.337217
| 2022-04-06T19:54:49
| 2022-04-06T19:54:49
| 146,352,641
| 7
| 20
|
Apache-2.0
| 2023-03-13T10:26:54
| 2018-08-27T20:51:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
import time
from triggering.scheduled import schedule_trigger, run as run_scheduled
from triggering.webhook import WebHook
use_webhook_triggers = False
def configure_triggers(task, job):
global use_webhook_triggers
for trigger in task["triggers"]:
if trigger["type"] == "scheduled":
schedule_trigger(task["name"], trigger, job)
print(f"Configured scheduler for task {task['name']}")
elif trigger["type"] == "webhook":
webhook = WebHook()
webhook.add(task["name"], trigger["route"], job)
use_webhook_triggers = True
print(f"Configured webhook handler for task {task['name']}")
def start():
global use_webhook_triggers
if use_webhook_triggers:
print("Webhook handlers have been configured... spawning webserver thread.")
webhook = WebHook()
webhook.listen()
else:
print("No webhook triggers detected... Skipping webserver initialization.")
print()
print("---------------------------------------------")
print("Configuration is complete - tasks are active.")
print("---------------------------------------------")
print()
while True:
run_scheduled()
time.sleep(1)
|
[
"noreply@github.com"
] |
redhat-cop.noreply@github.com
|
79e34307db263114700edf431775570f6216a74b
|
bf924da04db250c383a1d1fa793f036f6fd123ee
|
/src/basics/mount_device.py
|
5e02383f99b272b78a711af885bdafd3d409c694
|
[
"Apache-2.0"
] |
permissive
|
mizhu2/cosc-learning-labs
|
2bb2d60fa9e9f6b0630029b742e2901a34480f69
|
81bf3ee8338975ea54458142920e951e64dc6161
|
refs/heads/master
| 2020-02-26T13:49:51.410772
| 2015-05-05T09:35:39
| 2015-05-05T09:35:39
| 35,090,897
| 0
| 1
| null | 2015-05-05T09:42:49
| 2015-05-05T09:42:48
| null |
UTF-8
|
Python
| false
| false
| 4,505
|
py
|
# Copyright 2014 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
try:
from urllib import quote_plus
except ImportError:
from urllib.parse import quote_plus
from basics.odl_http import odl_http_post, odl_http_get, odl_http_delete
_request_content_template = '''<?xml version="1.0" encoding="UTF-8"?>
<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<type
xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">prefix:sal-netconf-connector</type>
<name>%s</name>
<address
xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">%s</address>
<port
xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">%s</port>
<username
xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">%s</username>
<password
xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">%s</password>
<tcp-only
xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">false</tcp-only>
<event-executor
xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netty">prefix:netty-event-executor</type>
<name>global-event-executor</name>
</event-executor>
<binding-registry
xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">prefix:binding-broker-osgi-registry</type>
<name>binding-osgi-broker</name>
</binding-registry>
<dom-registry
xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">prefix:dom-broker-osgi-registry</type>
<name>dom-broker</name>
</dom-registry>
<client-dispatcher
xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf">prefix:netconf-client-dispatcher</type>
<name>global-netconf-dispatcher</name>
</client-dispatcher>
<processing-executor
xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:threadpool">
prefix:threadpool</type>
<name>global-netconf-processing-executor</name>
</processing-executor>
</module>
'''
_bgp_url_suffix = 'config/opendaylight-inventory:nodes/node/controller-config/yang-ext:mount/config:modules'
_dismount_url_suffix_template = 'config/opendaylight-inventory:nodes/node/controller-config/yang-ext:mount/config:modules/module/odl-sal-netconf-connector-cfg:sal-netconf-connector/%s'
def mount_device(
device_name,
device_address,
device_port,
device_username,
device_password
):
request_content = _request_content_template % (device_name, device_address, device_port, device_username, device_password)
odl_http_post(_bgp_url_suffix, 'application/xml', request_content)
def dismount_device(
device_name
):
'Dismount a network device that has been mounted on the ODL server.'
# request_content = _request_content_template % (device_name, device_address, device_port, device_username, device_password)
# request_content = _request_content_template % (quote_plus(device_name), 'dummy_address', 'dummy_port', 'dummy_username', 'dummy_password')
dismount_url_suffix = _dismount_url_suffix_template % device_name
print odl_http_get(dismount_url_suffix, 'application/xml', expected_status_code=200).text
odl_http_delete(dismount_url_suffix, 'application/xml', expected_status_code=200)
print odl_http_get(dismount_url_suffix, 'application/xml', expected_status_code=200).text
|
[
"kjarrad@cisco.com"
] |
kjarrad@cisco.com
|
6880b72bbf3c24a798403324abcd33474591e71e
|
d4e96aa48ddff651558a3fe2212ebb3a3afe5ac3
|
/Modules/ThirdParty/pygccxml/src/pygccxml/declarations/decl_visitor.py
|
a5d7c4c4d39c76813dc9fa30b451d4476a6a6b34
|
[
"SMLNJ",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-mit-old-style",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"NTP",
"IJG",
"GPL-1.0-or-later",
"libtiff",
"BSD-4.3TAHOE",
"Zlib",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Spencer-86",
"Apache-2.0",
"FSFUL",
"LicenseRef-scancode-public-domain",
"Libpng",
"BSD-2-Clause"
] |
permissive
|
nalinimsingh/ITK_4D
|
18e8929672df64df58a6446f047e6ec04d3c2616
|
95a2eacaeaffe572889832ef0894239f89e3f303
|
refs/heads/master
| 2020-03-17T18:58:50.953317
| 2018-10-01T20:46:43
| 2018-10-01T21:21:01
| 133,841,430
| 0
| 0
|
Apache-2.0
| 2018-05-17T16:34:54
| 2018-05-17T16:34:53
| null |
UTF-8
|
Python
| false
| false
| 1,440
|
py
|
# Copyright 2014-2016 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
defines declarations visitor class interface
"""
class decl_visitor_t(object):
"""
declarations visitor interface
All functions within this class should be redefined in derived classes.
"""
def __init__(self):
object.__init__(self)
def visit_member_function(self):
raise NotImplementedError()
def visit_constructor(self):
raise NotImplementedError()
def visit_destructor(self):
raise NotImplementedError()
def visit_member_operator(self):
raise NotImplementedError()
def visit_casting_operator(self):
raise NotImplementedError()
def visit_free_function(self):
raise NotImplementedError()
def visit_free_operator(self):
raise NotImplementedError()
def visit_class_declaration(self):
raise NotImplementedError()
def visit_class(self):
raise NotImplementedError()
def visit_enumeration(self):
raise NotImplementedError()
def visit_namespace(self):
raise NotImplementedError()
def visit_typedef(self):
raise NotImplementedError()
def visit_variable(self):
raise NotImplementedError()
|
[
"ruizhi@csail.mit.edu"
] |
ruizhi@csail.mit.edu
|
a70f7d1a185d9185df7f8a94018f522b4d971f18
|
f25fba01f8aab8ed8e8314fe6bfb4b97bc0c6f72
|
/test12.py
|
7a48d176b3cd77a6cbcedfcaa5b770729782dcbe
|
[] |
no_license
|
NineOverGOAT/Pylab1
|
814ebd7fd46cab8336703e49fe79f66c88d5ad64
|
a941f26abdb3f015cc9b7b83e619e9242719338a
|
refs/heads/master
| 2020-03-14T09:30:05.213354
| 2018-07-29T22:41:19
| 2018-07-29T22:41:19
| 131,546,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
addming some additional code
nominal python scripty thingy
adding more scripty thingies
and more scrypty thingies
|
[
"anderson.donovan@gmail.com"
] |
anderson.donovan@gmail.com
|
ca12ddc797982eecc3a030c05dbb4d297aa3a2f9
|
327c394b5c8b57283237f5ab7c794adb782d7e27
|
/testSalience.py
|
c2b1c251ae1d4b22911889eae6d3a78879a7a3dd
|
[
"MIT"
] |
permissive
|
santacml/Malware-as-Video
|
25225d23e81581625deaf697ea334684ef656014
|
12dc3954f2237cb34857a44eadbb5ca76f8e97b5
|
refs/heads/master
| 2020-05-21T11:51:11.506221
| 2019-05-10T19:27:51
| 2019-05-10T19:27:51
| 186,040,328
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,012
|
py
|
import pickle, gzip, glob, sys, keras, os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # gets rid of AVX message
import random as rn
import numpy as np
import tensorflow as tf
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(37)
rn.seed(1254)
tf.set_random_seed(89)
from keras import optimizers
from keras import backend as K
from keras.models import load_model
from keras.layers import *
from keras.models import Sequential
from keras.losses import weighted_categorical_crossentropy
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.regularizers import *
from keras.utils.generic_utils import get_custom_objects
from keras.layers.advanced_activations import LeakyReLU, ELU
sys.path.insert(0, r'.\libraries')
from kerasLayers import *
from kerasExtras import *
elu = ELU(1)
elu.__name__ = "ELU"
import time
from keras import Model
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import seaborn as sns
from blobifier import Blobifier
from sequential import SequentialClusterer
input_length = None
# binary standard
line_length = 25
num_samples = 7500
num_samples_valid = 1600
train_name = r".\libraries\datasets\kagglewindows\windows_exe_dll_kaggle_nopad_pooled.pklz"
valid_name = r".\libraries\datasets\kagglewindows\windows_exe_dll_kaggle_validation_nopad_pooled.pklz"
steps_per_epoch = num_samples/batch_size
valid_steps = num_samples_valid/batch_size # should be this
model = load_model(r".\networks\dist binary final nets\pruned from 5\KaggleConv-22.hdf5",
custom_objects={'DecayingConvLSTM2D':MinConvRNN,
'window_size': window_size ,
'ELU': elu,
}
)
# mode = "kaggle"
mode = "binary"
if mode == "binary":
lossFunc = 'binary_crossentropy'
generatorFunc = loadDataGeneratorBinary
elif mode == "kaggle":
lossFunc = 'categorical_crossentropy'
generatorFunc = loadDataGenerator
def compileModel(model):
optimizer = "rmsprop"
model.compile(optimizer=optimizer,
loss=lossFunc,
metrics=['accuracy'])
def salienceTest(model, confMatrixFile):
if mode == "binary" or mode == "kaggle":
train_gen = generatorFunc(train_name, num_samples)
valid_gen = generatorFunc(valid_name, num_samples_valid)
else:
raise ValueError("mode must be kaggle or binary")
compileModel(model)
print(model.summary())
answers = []
preds = []
total_correct = 0
total_run = 0
correctCountClasses = [0]*9
incorrectCountClasses = [0]*9
times = []
confMatrix = np.zeros((9,9))
# remainingAmounts = []
totalBytes = 0
totalNNZ = 0
grad = K.gradients(model.layers[-1].input, model.layers[1].output)[0]
sess = K.get_session()
mal_x = []
ben_x = []
blobifyX = []
blobifyY = []
scatterLabels = []
new_mal_x = []
new_ben_x = []
blobifier = Blobifier()
for x in range(0, int(valid_steps)):
train_x, train_y = next(valid_gen)
# for x in range(0, int(steps_per_epoch)):
# train_x, train_y = next(train_gen)
length = train_x.shape[1]
answer = train_y.tolist()[0]
answers.append(answer)
out = sess.run(grad, feed_dict={model.input: train_x})
out = out[0] # comes in array of length 1
out = out.reshape((int(out.shape[0]*out.shape[1]), out.shape[2], out.shape[3]))
# filter input based on gradient value
new_train_x = np.copy(train_x[0])
new_train_x[np.abs(np.max(out, axis=2)) < 1e-16] = 0 # works great
'''
# different filtering attempts
# new_train_x[np.abs(np.max(out, axis=2)) > 1e-16] = 0 # works TERRIBLY for proof!
# new_train_x[np.abs(np.max(out, axis=2)) < 1e-4] = 0 # works fine
# new_train_x[np.max(np.abs(out), axis=2) < 1e-4] = 0 # works fine
# did not work
# m = np.max(np.abs(out), axis=2)
# new_train_x[m < (np.mean(m) - .5*np.std(m))] = 0
# attempt - this doesn't work
# blurred = gaussian_filter(new_train_x, sigma=1)
# new_train_x_blurred = np.copy(train_x[0])
# new_train_x_blurred[np.abs(blurred) < 15] = 0
# new_train_x = new_train_x_blurred
'''
# keep filtered w/o blobs removed
not_removed = new_train_x
# remove low gradient areas
if np.sum(new_train_x) > 0:
new_train_x = blobifier.blobify(new_train_x, int(answer))
# Plot images
# fig = plt.figure()
# show different saliency maps
# ax1 = fig.add_subplot(1,4,1)
# ax1.imshow(train_x[0], cmap='gray')
# plt.axis('off')
# ax2 = fig.add_subplot(1,4, 2)
# ax2.imshow(np.mean(out, axis=2), cmap='gray')
# plt.axis('off')
# ax3 = fig.add_subplot(1,4, 3)
# ax3.imshow(np.max(out, axis=2), cmap='gray')
# plt.axis('off')
# ax4 = fig.add_subplot(1,4, 4)
# ax4.imshow(np.min(out, axis=2), cmap='gray')
# plt.axis('off')
# filtering example
# ax1 = fig.add_subplot(1,4,1)
# ax1.imshow(train_x[0], cmap='gray')
# plt.axis('off')
# ax3 = fig.add_subplot(1,4, 2)
# ax3.imshow(np.max(out, axis=2), cmap='gray')
# plt.axis('off')
# ax1 = fig.add_subplot(1,4,3)
# ax1.imshow(not_removed, cmap='gray')
# plt.axis('off')
# ax1 = fig.add_subplot(1,4,4)
# ax1.imshow(new_train_x, cmap='gray')
# plt.axis('off')
# plt.show()
# make prediction on filtered version
start = time.time()
pred = model.predict(np.asarray([new_train_x.tolist()])).tolist()[0]
# pred = model.predict(train_x).tolist()[0]
amt = time.time() - start
times.append(amt)
preds.append(pred)
totalNNZ += np.count_nonzero(new_train_x)
totalBytes += train_x.size
if int(answer):
mal_x.append(train_x)
new_mal_x.append(new_train_x)
else:
ben_x.append(train_x)
new_ben_x.append(new_train_x)
if mode == "binary":
pred = round(pred[0])
ansClass = int(answer)
if pred == ansClass:
total_correct += 1
correctCountClasses[ansClass] += 1
else:
incorrectCountClasses[ansClass] += 1
# print(pred == ansClass, np.count_nonzero(new_train_x) / new_train_x.size)
confMatrix[ansClass][pred] += 1
total_run += 1
if x % 50 == 0: print("interval", x, "correct so far", total_correct, "% of total bytes left", totalNNZ/totalBytes)
elif mode == "kaggle":
# ansClass = numpy.argmax(train_y,1)[0]
ansClass = answer.index(max(answer))
predClass = pred.index(max(pred))
if predClass == ansClass:
total_correct += 1
correctCountClasses[ansClass] += 1
else:
incorrectCountClasses[ansClass] += 1
confMatrix[ansClass][predClass] += 1
total_run += 1
if x % 500 == 0: print("interval", x, "correct so far", total_correct)
print("correct:", total_correct, "out of", total_run)
print("correct per class :", correctCountClasses)
print("incorrect per class:", incorrectCountClasses)
print("mean time to predict", np.mean(times))
print(confMatrix)
if confMatrixFile:
np.savetxt(confMatrixFile, confMatrix, delimiter=",")
# plot length of file vs amount removed, per class
# plt.scatter(blobifier.blobifyX, blobifier.blobifyY, c=blobifier.blobifyC)
# plt.show()
# blobs are separated into lists - malware blobs and benign blobs
# can use them however you want
print("number of malware blobs:", len(blobifier.malwareBlobs)) #6019, 5982, 5957
print("number of benign blobs:", len(blobifier.benignBlobs)) #10512, 10580
# show individual blobs
for blob in blobifier.malwareBlobs:
fig = plt.figure()
ax1 = fig.add_subplot(1,4,1)
ax1.imshow(blob, cmap='gray')
plt.axis('off')
plt.show()
# blobifier.malwareBlobs = blobifier.malwareBlobs[:300] # for testing! makes things much faster for clustering, analysis. FOR DEBUG ONLY
# cluster blobs if desired
# clusterer = SequentialClusterer()
# clusterer.addCandidates(blobifier.malwareBlobs)
# clusterer.addCandidates(blobifier.benignBlobs)
# clusterer.cluster()
# distance analysis if desired
# distanceAnalysis(blobifier, mal_x, ben_x)
def distanceAnalysis(blobifier, mal_x, ben_x):
# do the distance analysis in part 5 of paper
# somewhat sketchy, but seems significant
# To see if this works at all..
# blobifier.malwareBlobs = new_mal_x[:100]
# blobifier.benignBlobs = new_ben_x[:100]
# mal_x = mal_x[:100]
# ben_x = ben_x[:100]
for n in range(0, len(blobifier.malwareBlobs)):
# correlation
# blobifier.malwareBlobs[n] = np.array(blobifier.malwareBlobs[n]).flatten()
# tf dif
blobifier.malwareBlobs[n] = np.array(blobifier.malwareBlobs[n]).flatten()
blobifier.malwareBlobs[n] = " ".join([str(item) for item in blobifier.malwareBlobs[n]])
# substrings
# flat = np.array(blobifier.malwareBlobs[n]).flatten()
# blobifier.malwareBlobs[n] = np.trim_zeros(flat).tolist()
# for n grams manually
# blobifier.malwareBlobs[n] = [str(int(item)) for item in blobifier.malwareBlobs[n]]
for n in range(0, len(blobifier.benignBlobs)):
# correlation
# blobifier.benignBlobs[n] = np.array(blobifier.benignBlobs[n]).flatten()
# tf dif
blobifier.benignBlobs[n] = np.array(blobifier.benignBlobs[n]).flatten()
blobifier.benignBlobs[n] = " ".join([str(item) for item in blobifier.benignBlobs[n]])
# substrings
# flat = np.array(blobifier.benignBlobs[n]).flatten()
# blobifier.benignBlobs[n] = np.trim_zeros(flat).tolist()
# for n grams manually
# blobifier.benignBlobs[n] = [str(int(item)) for item in blobifier.benignBlobs[n]]
print("beginning test")
# transform regular files like above
for n in range(0, len(mal_x)):
# tf dif
mal_x[n] = np.array(mal_x[n]).flatten()
mal_x[n] = " ".join([str(item) for item in mal_x[n]])
for n in range(0, len(ben_x)):
ben_x[n] = np.array(ben_x[n]).flatten()
ben_x[n] = " ".join([str(item) for item in ben_x[n]])
lowerN = 1
upperN = 1
print("performing tfidf")
tfidf = TfidfVectorizer(ngram_range=(lowerN,upperN)).fit_transform(blobifier.malwareBlobs)
similarity_matrix = tfidf * tfidf.T
indices = np.triu_indices(similarity_matrix.shape[0], k=1)
similarities = similarity_matrix[indices].flatten()
print("Blobbed malware to malware")
print("mean", np.mean(similarities), "max", np.max(similarities), "min", np.min(similarities), "std", np.std(similarities))
benignTFIDF = TfidfVectorizer(ngram_range=(lowerN,upperN)).fit(blobifier.malwareBlobs)
benignTFIDF = benignTFIDF.transform(blobifier.benignBlobs)
crossSimilarities = tfidf * benignTFIDF.T
crossSimilarities = crossSimilarities.A.flatten()
print("Blobbed malware to benign")
print("mean", np.mean(crossSimilarities), "max", np.max(crossSimilarities), "min", np.min(crossSimilarities), "std", np.std(crossSimilarities))
benign_similarity_matrix = benignTFIDF * benignTFIDF.T
indices = np.triu_indices(benign_similarity_matrix.shape[0], k=1)
benign_similarities = benign_similarity_matrix[indices].flatten()
print("Blobbed benign to benign")
print("mean", np.mean(benign_similarities), "max", np.max(benign_similarities), "min", np.min(benign_similarities), "std", np.std(benign_similarities))
postMalMalSim = similarities
postMalBenSim = crossSimilarities
postBenBenSim = benign_similarities
print()
print()
tfidf = TfidfVectorizer(ngram_range=(lowerN,upperN)).fit_transform(mal_x)
similarity_matrix = tfidf * tfidf.T
indices = np.triu_indices(similarity_matrix.shape[0], k=1)
similarities = similarity_matrix[indices].flatten()
print("Normal malware to malware")
print("mean", np.mean(similarities), "max", np.max(similarities), "min", np.min(similarities), "std", np.std(similarities))
benignTFIDF = TfidfVectorizer(ngram_range=(lowerN,upperN)).fit(mal_x)
benignTFIDF = benignTFIDF.transform(ben_x)
crossSimilarities = tfidf * benignTFIDF.T
crossSimilarities = crossSimilarities.A.flatten()
print("Normal malware to benign")
print("mean", np.mean(crossSimilarities), "max", np.max(crossSimilarities), "min", np.min(crossSimilarities), "std", np.std(crossSimilarities))
benign_similarity_matrix = benignTFIDF * benignTFIDF.T
indices = np.triu_indices(benign_similarity_matrix.shape[0], k=1)
benign_similarities = benign_similarity_matrix[indices].flatten()
print("Normal benign to benign")
print("mean", np.mean(benign_similarities), "max", np.max(benign_similarities), "min", np.min(benign_similarities), "std", np.std(benign_similarities))
preMalMalSim = similarities
preMalBenSim = crossSimilarities
preBenBenSim = benign_similarities
print()
print()
print("Statistics test")
# from scipy.stats import ttest_ind
# print("Malware to Malware", ttest_ind(postMalMalSim.T, preMalMalSim.T, equal_var=False))
# print("Malware to Benign", ttest_ind(postMalBenSim.T, preMalBenSim.T, equal_var=False))
# print("Benign to Benign", ttest_ind(postBenBenSim.T, preBenBenSim.T, equal_var=False))
from scipy import stats
print("Malware to Malware", stats.ttest_rel(postMalMalSim.T, preMalMalSim.T))
print("Malware to Benign", stats.ttest_rel(postMalBenSim.T, preMalBenSim.T))
print("Benign to Benign", stats.ttest_rel(postBenBenSim.T, preBenBenSim.T))
print("Malware to Malware kruskal", stats.kruskal(postMalMalSim.T, preMalMalSim.T))
print("Malware to Benign kruskal", stats.kruskal(postMalBenSim.T, preMalBenSim.T))
print("Benign to Benign kruskal", stats.kruskal(postBenBenSim.T, preBenBenSim.T))
# https://stackoverflow.com/questions/44862712/td-idf-find-cosine-similarity-between-new-document-and-dataset
# https://stackoverflow.com/questions/6255835/cosine-similarity-and-tf-idf?rq=1
# https://github.com/scipy/scipy/issues/7759
# https://www.itl.nist.gov/div898/handbook/prc/section4/prc41.htm
sns.distplot(preMalBenSim)
plt.show()
sns.distplot(postMalBenSim)
plt.show()
sns.distplot(preBenBenSim)
plt.show()
sns.distplot(postBenBenSim)
plt.show()
sns.distplot(postMalMalSim)
plt.show()
sns.distplot(preMalMalSim)
plt.show()
# '''
def blobToImage(blob):
tokens = blob.split(" ")
# for token in tokens:
# print(token)
# print(token[:-2])
# comes as 237.0, cut off .-0
tokens = [int(token[:-2]) for token in tokens]
arr = np.array(tokens)
arr = arr.reshape((int(arr.shape[0]/25), 25))
return arr
def lookAtClusters():
# clusterFile = r".\clusters_sequential.pklz"
clusterFile = r".\clusters_sequential_BENIGN.pklz"
readMe = gzip.open(clusterFile, "r")
clusters = pickle.load(readMe)
print(len(clusters))
# print(clusters[0][0])
for cluster in clusters:
rn.shuffle(cluster)
firstFew = cluster[:20]
fig = plt.figure()
for n, blob in enumerate(firstFew):
ax1 = fig.add_subplot(4,5,n+1)
ax1.imshow(blobToImage(blob), cmap='gray')
plt.axis('off')
plt.show()
if __name__ == "__main__":
print("--------------------Performing Salience Test--------------------")
salienceTest(model, "")
# print("--------------------Looking At Clusters--------------------")
# lookAtClusters()
print("--------------------Testing Over--------------------")
|
[
"santacml@mail.uc.edu"
] |
santacml@mail.uc.edu
|
a1bd962349d27e4a59ae4070a21b49787a1ee1b1
|
380f0b5d0ae85e56ae09e591b7aac48a3f5cb8d2
|
/milho/validators.py
|
f5871d838f3772eca4a5b61ef85ca09389de3194
|
[] |
no_license
|
EnzoSalvadori/MilhoSite
|
563982b45d79e4479887bfd42eee24d458d2c3c4
|
e76df3a720dfba37edaac211ae6768205448729a
|
refs/heads/main
| 2023-08-01T10:09:02.400227
| 2021-09-23T14:23:16
| 2021-09-23T14:23:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
from django.core.exceptions import ValidationError
def validate_file_size(value):
filesize= value.size
if filesize > 100:
raise ValidationError("The maximum file size that can be uploaded is 10MB")
else:
return value
|
[
"63012963+EnzoSalvadori@users.noreply.github.com"
] |
63012963+EnzoSalvadori@users.noreply.github.com
|
439b31ff32f2eef202bba54a32940a0b31b9ae25
|
9fc87cb12e7cfd8de0aa6ddf1717852a7385e61b
|
/agents.py
|
06f310eb516003aea8e03adbf65226c7fde9a451
|
[] |
no_license
|
VipinVeetil/network_coordination
|
a8ba721514b3cb556caf7a52495a5664b2c741f4
|
37911a7aca44b1c83b07bc4c3caa8005a4a0e281
|
refs/heads/master
| 2021-01-10T15:19:33.368961
| 2016-01-27T23:29:18
| 2016-01-27T23:29:18
| 50,057,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,446
|
py
|
"""
Please feel free to use the code without citing or crediting the author(s) mentioned below. Cheers to science :-)
I'd be happy to hear from you about how to improve this code, and as to how the code may have been useful to you.
Author: Vipin P. Veetil
Contact: vipin.veetil@gmail.com
Paper title: Network Origins of Coordination
Paper URL: http://papers.ssrn.com/sol3/papers.cfm?abstract_id=2621852
Language: Python
Module name: agents
"""
from __future__ import division
import random
class Agent(object):
def __initi__(self):
self.number_of_states = 0
""" number of possible states """
self.state = 0
""" present state """
self.frequency_neighbors_states = [0] * self.number_of_states
""" the number of neighbors that have each of the possible states """
def update_neighbors_states(self, neighbors_states):
""" record the states of the neighbors """
self.frequency_neighbors_states = [0] * self.number_of_states
for state in neighbors_states:
self.frequency_neighbors_states[state] += 1
def update_state(self):
""" update one's own state to the state that is most frequent among neighbors """
m = max(self.frequency_neighbors_states)
max_states = [state for state, x in enumerate(self.frequency_neighbors_states) if x == m]
""" make a list of the states that have highest frequency, it is possible more than one state has highest frequency """
self.state = random.choice(max_states)
|
[
"vipin.veetil@gmail.com"
] |
vipin.veetil@gmail.com
|
91dff73b31e860e887c69abb52768513aeedd943
|
4aa3a9f658802666800d7de9ceee1db9a5335da5
|
/tests/test_phantom.py
|
26d1fb1557a81f91fbee16ab2a92fe5a8ed8a7fb
|
[
"LicenseRef-scancode-cecill-b-en",
"CECILL-B"
] |
permissive
|
esoubrie/siddon
|
98d03099dece19a388c5a4fc5ab649b47483f503
|
d87b309320a16a7fcd9002f72b30b896d4013c7d
|
refs/heads/master
| 2021-01-21T20:50:01.804675
| 2019-12-09T09:01:47
| 2019-12-09T09:01:47
| 1,012,046
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,694
|
py
|
#!/usr/bin/env python
"""
Testing phantom generation module.
"""
import nose
from numpy.testing import *
import numpy as np
from tomograpy import phantom
from tomograpy.phantom import *
# test cases
phantoms = [yu_ye_wang, shepp_logan, modified_shepp_logan]
# not everything is working if some dimensions are 1 or 2 now:
#shapes = [(1, 1, 1), (16, 16, 16), (16, 16, 1), (16, 1, 1)]
shapes = [(16, 16, 16), (16, 16, 3), (16, 3, 3), (3, 16, 3), (3, 3, 16)]
shape16 = shapes[0]
dtypes = [np.float32, np.float64, np.int32, np.int64]
spheres = [
{'A':1, 'a':1., 'b':1., 'c':1., 'x0':0., 'y0':0., 'z0':0., 'phi':0., 'theta':0., 'psi':0.},
{'A':.5, 'a':1., 'b':1., 'c':1., 'x0':0., 'y0':0., 'z0':0., 'phi':0., 'theta':0., 'psi':0.}
]
spheres_arrays = [
[[1., 1., 1., 1., 0., 0., 0., 0., 0., 0.]],
[[.5, 1., 1., 1., 0., 0., 0., 0., 0., 0.]],
]
# tests for all predifined phantoms
for p in phantoms:
def test_shape():
for shape in shapes:
yield assert_equal, p(shape).shape, shape
def test_dtype():
for dtype in dtypes:
for shape in shapes:
yield assert_equal, p(shape, dtype=dtype).dtype, dtype
# tests on the phantom function
def test_central_value():
for shape in shapes:
i, j, k = np.asarray(shape) / 2.
for p in spheres:
yield assert_equal, phantom(shape, [p,])[i, j, k], p['A']
# test conversion from array to dict
def test_array_to_parameters():
from siddon.phantom import _array_to_parameters
for a, p in zip(spheres_arrays, spheres):
yield assert_array_equal, _array_to_parameters(a), p
if __name__ == "__main__":
nose.run(argv=['', __file__])
|
[
"nicolas.a.barbey@gmail.com"
] |
nicolas.a.barbey@gmail.com
|
71f1773daec9c741a842d55a7e3912d779d8463d
|
927e8a9390d219a14fce6922ab054e2521a083d3
|
/contest 21/lucy's home.py
|
6a1412bfd92bba7ca8499e7dbc513558d0e0c567
|
[] |
no_license
|
RavinderSinghPB/data-structure-and-algorithm
|
19e7784f24b3536e29486ddabf4830f9eb578005
|
f48c759fc347471a44ac4bb4362e99efacdd228b
|
refs/heads/master
| 2023-08-23T21:07:28.704498
| 2020-07-18T09:44:04
| 2020-07-18T09:44:04
| 265,993,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
def getMax(arr,n):
return max(arr)
def getSum(arr,n):
return sum(arr)
def numOfPaint(arr,n,maxLen):
ttl,numOfPaintr=0,1
for i in range(n):
ttl+=arr[i]
if ttl>maxLen:
ttl=arr[i]
numOfPaintr+=1
return numOfPaintr
def Min_Time(arr,n,k):
lo=max(arr)
hi=sum(arr)
while lo<hi:
mid=lo+(hi-lo)//2
reqPaint=numOfPaint(arr,n,mid)
if reqPaint<=k:
hi=mid
else:
lo=mid+1
return lo
if __name__ == '__main__':
tcs=int(input())
for _ in range(tcs):
k,n=[int(x) for x in input().split()]
arr=[int(x) for x in input().split()]
print(Min_Time(arr,n,k))
|
[
"ravindersingh.gfg@gmail.com"
] |
ravindersingh.gfg@gmail.com
|
e044b589bba7e4cbc4b896312eb463c02e2beb49
|
53438732c6bc70b0d15eea99d961d6036f8839df
|
/Practice1/Login/migrations/0001_initial.py
|
1449691fdd485d2e73bb38cacffa2f68d64360c6
|
[] |
no_license
|
Amarjeet2629/MyPycharmProjects
|
6e07c972dce8ef12453ae0246bcbfcfd03cba1fb
|
179a87f327d7c036a6192d0c6e372f2f1e3588ff
|
refs/heads/master
| 2023-05-07T20:32:22.091132
| 2021-04-20T17:06:15
| 2021-04-20T17:06:15
| 224,671,445
| 0
| 0
| null | 2023-04-21T20:51:29
| 2019-11-28T14:32:13
|
Python
|
UTF-8
|
Python
| false
| false
| 620
|
py
|
# Generated by Django 2.2.5 on 2019-09-03 11:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='user',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first', models.CharField(max_length=264)),
('last', models.CharField(max_length=264)),
('email', models.EmailField(max_length=264, unique=True)),
],
),
]
|
[
"amarjeet.sinha.mec17@itbhu.ac.in"
] |
amarjeet.sinha.mec17@itbhu.ac.in
|
6e455b6a41be5b2c636c214c709bbab5e0d50cee
|
5e9f7de171d63e68bc5dbfffde62fb24aebab479
|
/src/utils.py
|
9763028a027fefb5ec615c973ac41b3bec07bcd5
|
[] |
no_license
|
dinvincible98/Camera_Calibration_LIB
|
80123b1b2e05f2b2773a7ad3800957cc47cde176
|
350380d73628d59a24352d12debdbc3f49100786
|
refs/heads/master
| 2023-08-20T02:44:27.566410
| 2021-10-26T23:14:19
| 2021-10-26T23:14:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,374
|
py
|
# Utils Functions
import numpy as np
import math
def get_transformation_matrix(pts, pt_type):
x , y = 0, 0
if pt_type == 0:
x, y = pts[:,0][:,0], pts[:,0][:,1]
else:
x, y = pts[:,0], pts[:,1]
mean_x = np.mean(x)
mean_y = np.mean(y)
var_x = np.var(x)
var_y = np.var(y)
sx = np.sqrt(2.0 / var_x)
sy = np.sqrt(2.0 / var_y)
# Transformation matrix
Nx = np.array([[sx, 0.0,-sx*mean_x],
[0.0, sy, -sy*mean_y],
[0.0, 0.0, 1.0]])
return Nx
def homo_cost_func(coordinates, *params):
h11, h12, h13, h21, h22, h23, h31, h32, h33 = params
N = coordinates.shape[0] // 2
X = coordinates[:N]
Y = coordinates[N:]
w = h31*X + h32*Y + h33
x = (h11*X + h12*Y + h13) / w
y = (h21*X + h22*Y + h23) / w
res = np.zeros_like(coordinates)
res[:N] = x
res[N:] = y
# print(res)
return res
def homo_jacobian_func(coordinates, *params):
h11, h12, h13, h21, h22, h23, h31, h32, h33 = params
N = coordinates.shape[0] // 2
X = coordinates[:N]
Y = coordinates[N:]
J = np.zeros((2*N,9))
J_x = J[:N]
J_y = J[N:]
s_x = h11*X + h12*Y + h13
s_y = h21*X + h22*Y + h23
w = h31*X + h32*Y + h33
J_x[:,0] = X / w
J_x[:,1] = Y / w
J_x[:,2] = 1 / w
J_x[:,6] = -s_x*X / (w*w)
J_x[:,7] = -s_x*Y / (w*w)
J_x[:,8] = -s_x / (w*w)
J_y[:,3] = X / w
J_y[:,4] = Y / w
J_y[:,5] = 1 / w
J_y[:,6] = -s_y*X / (w*w)
J_y[:,7] = -s_y*Y / (w*w)
J_y[:,8] = -s_y / (w*w)
return J
def create_v_ij(i,j,h_list):
v_ij = np.zeros((h_list.shape[0],6))
v_ij[:,0] = h_list[:,0,i] * h_list[:,0,j]
v_ij[:,1] = h_list[:,0,i]*h_list[:,1,j] + h_list[:,1,i]* h_list[:,0,j]
v_ij[:,2] = h_list[:,1,i] * h_list[:,1,j]
v_ij[:,3] = h_list[:,2,i]*h_list[:,0,j] + h_list[:,0,i]*h_list[:,2,j]
v_ij[:,4] = h_list[:,2,i]*h_list[:,1,j] + h_list[:,1,i]*h_list[:,2,j]
v_ij[:,5] = h_list[:,2,i] * h_list[:,2,j]
return v_ij
def to_homogenous_pts(pts):
pts = np.atleast_2d(pts)
N = pts.shape[0]
pts_hom = np.hstack((pts,np.ones((N,1))))
return pts_hom
def to_homogeneous_3d_pts(pts):
if(pts.ndim !=2 or pts.shape[-1]!=2):
raise ValueError("Must be 2d inhomogenous")
N = pts.shape[0]
pts_3d = np.hstack((pts,np.zeros((N,1))))
# print(pts_3d)
pts_3d_hom = to_homogenous_pts(pts_3d)
return pts_3d_hom
def to_inhomogenous_pts(pts):
pts = np.atleast_2d(pts)
N = pts.shape[0]
pts /= pts[:,-1][:,np.newaxis]
pts_inhom = pts[:,:-1]
return pts_inhom
def to_rodrigues_vec(rot_mat):
p = 0.5 * np.array([[rot_mat[2][1]-rot_mat[1][2]],
[rot_mat[0][2]-rot_mat[2][0]],
[rot_mat[1][0]-rot_mat[0][1]]])
c = 0.5 * (np.trace(rot_mat)-1)
# print(p)
# print(c)
if np.linalg.norm(p) == 0:
if c == 1:
rot_vec = np.array([0,0,0])
elif c == -1:
rot_mat_plus = rot_mat + np.eye(3,3,dtype='float')
norm_arr = np.array([np.linalg.norm(rot_mat_plus[:,0]),
np.linalg.norm(rot_mat_plus[:,1]),
np.linalg.norm(rot_mat_plus[:,2])])
v = rot_mat_plus[:, np.where(norm_arr==max(norm_arr))]
u = v / np.linalg.norm(v)
# print(u)
u0, u1, u2 = u[0], u[1], u[2]
if u0<0 or (u0==0 and u1<0) or (u0==0 and u1==0 and u2<0):
u = -u
rot_vec = math.pi * u
else:
rot_vec = []
else:
u = p / np.linalg.norm(p)
# print(u)
theta = math.atan2(np.linalg.norm(p),c)
rot_vec = theta * u
return rot_vec
def to_rotation_matrix(rot_vec):
theta = np.linalg.norm(rot_vec)
rot_vec_hat = rot_vec / np.linalg.norm(rot_vec) # unit vector
rot_x, rot_y, rot_z = rot_vec_hat[0], rot_vec_hat[1], rot_vec_hat[2]
W = np.array([[0, -rot_z, rot_y],
[rot_z, 0, -rot_z],
[-rot_y, rot_x, 0]])
R = np.eye(3,dtype=np.float32) + W*math.sin(theta) + W*W*(1-math.cos(theta))
return R
def compose_parameter_vector(cam_intrinsics, k, ext_list):
a = np.array([cam_intrinsics[0][0], cam_intrinsics[1][1], cam_intrinsics[0][1],
cam_intrinsics[0][2], cam_intrinsics[1][2], k[0], k[1]])
P = a
M = len(ext_list)
for i in range(M):
R, t = ext_list[i][:,:3], ext_list[i][:,3]
# print(R)
# print(t)
rot_vec = to_rodrigues_vec(R)
w = np.append(rot_vec,t)
P = np.append(P,w)
return P
def decompose_parameter_vector(P):
cam_intrinsics = np.array([[P[0],P[2],P[3]],
[0, P[1], P[4]],
[0, 0, 1]])
k = np.array([P[5], P[6]])
W = [] # list of R|t matrix
M = (len(P) - 7) // 6 # num of extrinsics in list
for i in range(M):
m = 7 + 6*i
rot_vec = P[m:m+3]
t = np.reshape(P[m+3:m+6],(3,-1))
R = to_rotation_matrix(rot_vec)
R_t = np.concatenate((R,t),axis=1)
W.append(R_t)
return cam_intrinsics, k, W
def get_project_coordinates(cam_intrinsics, ext, k, coord):
coor = np.array([coord[0],coord[1],0,1])
coor_norm = np.dot(ext,coor)
coor_norm /= coor_norm[-1]
r = np.linalg.norm(coor_norm)
uv = np.dot(np.dot(cam_intrinsics,ext),coor)
uv /= uv[-1]
u0 = uv[0]
v0 = uv[1]
uc = cam_intrinsics[0][2]
vc = cam_intrinsics[1][2]
u = u0 + (u0-uc)*r*r*k[0] + (u0-uc)*r*r*r*r*k[1]
v = v0 + (v0-vc)*r*r*k[0] + (v0-vc)*r*r*r*r*k[1]
return np.array([u,v])
def refine_cost_func(P, W, img_pts, obj_pts):
M = (len(P)-7) // 6 # num of views
N = len(obj_pts[0]) # num of model pts
cam_intrinsics = np.array([[P[0], P[2], P[3]],
[0, P[1], P[4]],
[0, 0, 1]])
k = np.array(P[5:7])
Y = np.array([])
# print(k)
for i in range(M):
m = 7 + 6*i
w = P[m:m+6]
W_curr = W[i]
for j in range(N):
Y = np.append(Y,get_project_coordinates(cam_intrinsics,W_curr,k,obj_pts[i][j]))
error_Y = np.array(img_pts).reshape(-1) - Y
# print(error_Y)
return error_Y
def refine_jacobian_func(P, W, img_pts, obj_pts):
M = (len(P)-7) // 6 # num of views
N = len(obj_pts[0]) # num of model pts
K = len(P)
cam_intrinsics = np.array([[P[0], P[2], P[3]],
[0, P[1], P[4]],
[0, 0, 1]])
dist = np.array(P[5:7])
# print(K)
res = np.array([])
for i in range(M):
m = 7 + 6*i
w = P[m:m+6]
R = to_rotation_matrix(w[:3])
# print(R)
t = w[3:].reshape(3,1)
# print(t)
W_curr = np.concatenate((R,t),axis=1)
# print(W_curr)
for j in range(N):
res = np.append(res, get_project_coordinates(cam_intrinsics,W_curr,dist,obj_pts[i][j]))
# print(res)
J = np.zeros((K, 2*M*N))
for k in range(K):
J[k] = np.gradient(res,P[k])
# print(J)
return np.transpose(J)
|
[
"mingqingyuan2021@u.northwestern.edu"
] |
mingqingyuan2021@u.northwestern.edu
|
08124406eb4df7184a595374b98b557697402c8b
|
dcd6ff8ad969688b7055ed0e7484979f71344ff3
|
/tests/runtests.py
|
b407de16944f88d75b89e70bb1477722ef917134
|
[
"Unlicense"
] |
permissive
|
Laeeth/ohmygentool
|
f9cf2f6376a185335a4ad63d99985eba27f6582c
|
fa9c16d5bba2f249ea92a58a19f4377d046b95f0
|
refs/heads/master
| 2020-06-18T00:42:59.301565
| 2019-06-07T03:54:42
| 2019-06-07T03:54:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,649
|
py
|
'''
Entry point for running system tests
'''
import os
import sys
import importlib
import argparse
# Make helper modules visible in tests
sys.path.append("./")
def find_tests(base_path=None):
'''Finds all test packages in specified directory'''
return find_selected(it for it in os.listdir(base_path) if it != '__pycache__')
def find_selected(names):
for p in names:
modpath = os.path.join(os.getcwd(), p)
if not os.path.isdir(modpath):
continue
try:
test = importlib.import_module(p)
yield test
except Exception as e:
print(e)
pass
def do_optional(f):
'''Tries silently call f() ignoring non existing attributes'''
try:
f()
except AttributeError:
pass
def run_tests(tests):
'''Run tests with optional setup and teardown phases'''
tests_ok = True
for n,test in enumerate(tests, 1):
if not run_single(test, n):
tests_ok = False
if not tests_ok:
print('Some tests not passed')
else:
print('All tests are OK')
def run_single(test, num=None):
'''Run single test'''
print(f'TEST {num or ""} [{test.__name__}]')
do_optional(lambda: test.setup())
try:
test.run()
except Exception as e:
print(e)
return False
finally:
do_optional(lambda: test.teardown())
return True
if __name__=='__main__':
if len(sys.argv) == 1:
run_tests(find_tests())
else:
print('Running selected tests:')
print('\t', " ".join(sys.argv[1:]))
run_tests(find_selected(sys.argv[1:]))
|
[
"absxv@yandex.ru"
] |
absxv@yandex.ru
|
593bcc9022a20d45e452698f766029e01470e609
|
28851f6d1e1d123074d0e8ccdff910dd59635aec
|
/Exceptions/try_except_finally.py
|
bd909544efeb4c407023f28acd2d1803af2cfdf1
|
[] |
no_license
|
sagarjaspal/Training
|
fc76ee921e1e38118e08cdd3287484135e05ec23
|
dbded9588be970d1e933f838f760229b9f88867b
|
refs/heads/master
| 2020-03-22T12:13:49.918234
| 2018-07-16T15:42:56
| 2018-07-16T15:42:56
| 140,026,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
a = int(input('Enter a: '))
b = int(input('Enter b: '))
try:
password = '007'
x = a/b
li = [1, 2, 3, 4]
print('Output', x)
except ZeroDivisionError as ze:
print('Exception is', ze)
finally:
password = ''
print('Pass', password)
print('Hi finally')
print('I am still running')
|
[
"fantooshsagar.15@gmail.com"
] |
fantooshsagar.15@gmail.com
|
92b7c7674156b1087f0f8989c6f71269d54d18a3
|
a3c662a5eda4e269a8c81c99e229879b946a76f6
|
/.venv/lib/python3.7/site-packages/pylint/test/regrtest_data/import_package_subpackage_module.py
|
2864e3c9efc4d60c43b428750f852330ec8b8ae2
|
[
"MIT"
] |
permissive
|
ahmadreza-smdi/ms-shop
|
0c29da82c58b243507575672bbc94fb6e8068aeb
|
65ba3f3061e2ac5c63115b08dadfe7d67f645fb6
|
refs/heads/master
| 2023-04-27T19:51:34.858182
| 2019-11-24T20:57:59
| 2019-11-24T20:57:59
| 223,616,552
| 6
| 2
|
MIT
| 2023-04-21T20:51:21
| 2019-11-23T16:09:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,238
|
py
|
# pylint: disable=I0011,C0301,W0611
"""I found some of my scripts trigger off an AttributeError in pylint
0.8.1 (with common 0.12.0 and astroid 0.13.1).
Traceback (most recent call last):
File "/usr/bin/pylint", line 4, in ?
lint.Run(sys.argv[1:])
File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 729, in __init__
linter.check(args)
File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 412, in check
self.check_file(filepath, modname, checkers)
File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 426, in check_file
astroid = self._check_file(filepath, modname, checkers)
File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 450, in _check_file
self.check_astroid_module(astroid, checkers)
File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 494, in check_astroid_module
self.astroid_events(astroid, [checker for checker in checkers
File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 511, in astroid_events
self.astroid_events(child, checkers, _reversed_checkers)
File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 511, in astroid_events
self.astroid_events(child, checkers, _reversed_checkers)
File "/usr/lib/python2.4/site-packages/pylint/lint.py", line 508, in astroid_events
checker.visit(astroid)
File "/usr/lib/python2.4/site-packages/logilab/astroid/utils.py", line 84, in visit
method(node)
File "/usr/lib/python2.4/site-packages/pylint/checkers/variables.py", line 295, in visit_import
self._check_module_attrs(node, module, name_parts[1:])
File "/usr/lib/python2.4/site-packages/pylint/checkers/variables.py", line 357, in _check_module_attrs
self.add_message('E0611', args=(name, module.name),
AttributeError: Import instance has no attribute 'name'
You can reproduce it by:
(1) create package structure like the following:
package/
__init__.py
subpackage/
__init__.py
module.py
(2) in package/__init__.py write:
import subpackage
(3) run pylint with a script importing package.subpackage.module.
"""
import package.subpackage.module
__revision__ = '$Id: import_package_subpackage_module.py,v 1.1 2005-11-10 16:08:54 syt Exp $'
|
[
"ahmadreza.smdi@gmail.com"
] |
ahmadreza.smdi@gmail.com
|
0a2e229c505d383bc4f65ae2067c2d1e61f70836
|
d427f6f1863091acfa8675784051850932b717d3
|
/iscsi.py
|
819e6b3a652990621d16f9aab0de6eade751e21c
|
[] |
no_license
|
Adarsh-sophos/Arcus-Cloud
|
f2adb63c6f21b218fa174489d082d67b04f24e59
|
32e37fbf7d5e6defc5b753adf0eaf388ee9c1df8
|
refs/heads/master
| 2021-03-20T03:35:03.207470
| 2019-01-12T09:55:36
| 2019-01-12T09:55:36
| 94,601,605
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,675
|
py
|
#!/usr/bin/python2
import config
import header
import cgi,commands,os,MySQLdb
header.header_content()
if(os.environ['REQUEST_METHOD'] == "POST"):
db = MySQLdb.connect("localhost","root", "Aj1.....", "arcus")
cursor = db.cursor()
# get username
sql = "SELECT * FROM users WHERE id={0}". format(header.cookie_value())
try:
cursor.execute(sql)
results = cursor.fetchone()
userName = results[1]
except:
print "Error: unable to fecth data"
iqn = cgi.FormContent()['iqn'][0]
vgname = 'vg1'
clientIP = cgi.FormContent()['clientIP'][0]
size = cgi.FormContent()['size'][0]
sql = "INSERT INTO iscsi(user_id,size,clientIP,state,iqn) VALUES ({0},{1},'{2}','{3}','{4}')". format(int(header.cookie_value()), int(size), clientIP, "login", iqn)
try:
cursor.execute(sql)
#db.commit()
except:
# Rollback in case there is any error
print("could not insert in database")
db.rollback()
last_id = cursor.lastrowid
targetsFp = open("/Arcus/public/tmp/iscsi/targets.conf", "a")
targetsFp.write("\n<target {0}>\n\tbacking-store /dev/{1}/{2}-iscsi\n</target>\n\n". format(iqn, vgname, last_id))
targetsFp.close()
ansibleString = """
- hosts: web
tasks:
#yum install scsi-target-utils -y
- package:
name: "scsi-target-utils"
state: present
#create LV
- lvol:
vg: {3}
lv: {0}-iscsi
size: {1}
#create a partition in storage (don't format)
#write in /etc/tgt/targets.conf file -
#<target {2}>
# backing-store /dev/{3}/{0}-iscsi
#</target>
- name: "setup config file"
copy:
src: "/Arcus/public/tmp/iscsi/targets.conf"
dest: "/etc/tgt/targets.conf"
#systemctl restart tgtd
- service:
name: "tgtd"
state: restarted
""". format(last_id, size, iqn, vgname)
ansibleProg = open("/Arcus/public/tmp/iscsi/iscsi.yaml", "w")
ansibleProg.write(ansibleString)
ansibleProg.close()
ansF = commands.getstatusoutput("sudo ansible-playbook /Arcus/public/tmp/iscsi/iscsi.yaml")
if(ansF[0] == 0):
print("<pre> " + ansF[1] + " </pre>")
# set up client
sshString = "sudo sshpass -p {0} ssh -o stricthostkeychecking=no -l root {1}". format("redhat", clientIP)
inF = commands.getstatusoutput(sshString + " sudo yum install iscsi-initiator-utils -y")
if(inF[0] == 0):
disF = commands.getstatusoutput(sshString + " sudo iscsiadm --mode discoverydb --type sendtargets --portal {} --discover". format('192.168.43.171'))
if(disF[0] == 0):
logF = commands.getstatusoutput(sshString + " sudo iscsiadm --mode node --targetname {0} --portal {1}:3260 --login". format(iqn, '192.168.43.171'))
if(logF[0] == 0):
print("<h3>setup complete</h3>")
db.commit()
else:
print(logF[1])
db.rollback()
else:
print(disF[1])
db.rollback()
else:
print(inF[1])
db.rollback()
else:
print("<pre> " + ansF[1] + " </pre>")
db.rollback()
elif(os.environ['REQUEST_METHOD'] == "GET"):
print """
<div class="form-photo">
<div class="form-container">
<div class="image-holder" style="background-image:url("/img/svg_cloud_nfs.jpg");margin:10px;padding:20px;"></div>
<form method="POST" action="iscsi.py">
<h2 class="text-center">iSCSI Share</h2>
<div class="form-group">
<input class="form-control" type="text" name="clientIP" placeholder="clientIP">
</div>
<div class="form-group">
<input class="form-control" type="text" name="size" placeholder="Drive size in MB">
</div>
<div class="form-group">
<input class="form-control" type="text" name="iqn" placeholder="IQN">
</div>
<div class="form-group has-success">
<div class="checkbox">
<label class="control-label" style="margin:auto;">
<input type="checkbox"> Confirm?</label>
</div>
</div>
<div class="form-group">
<button class="btn btn-primary btn-block" type="submit">SUBMIT </button>
</div>
</form>
</div>
</div>
<script src="/js/jquery.min.js"></script>
<script src="/bootstrap/js/bootstrap.min.js"></script>
<script src="https://cdn.datatables.net/1.10.15/js/jquery.dataTables.min.js"></script>
<script src="https://cdn.datatables.net/1.10.15/js/dataTables.bootstrap.min.js"></script>
<script src="/js/script.min.js"></script>
</body>
</html>
"""
|
[
"adarshjain583@gmail.com"
] |
adarshjain583@gmail.com
|
1590e040d0ea13bf23e01f855954dc37ec2831ba
|
af442a26d532457295b74e54cf52993a89370959
|
/app/api/v1/webhook.py
|
e794992036294ea01fac0ed254bf6e0ac03e9da8
|
[] |
no_license
|
ForkManager/Forked-fastcampusapi
|
2b286f0631db2750553c4a3ca2edefb1bd1010a0
|
89f77c55ac1abc45c925bef84d01a48c24ef703f
|
refs/heads/main
| 2023-05-29T15:43:49.272591
| 2021-06-18T13:57:59
| 2021-06-18T13:57:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
from fastapi import APIRouter, Body, Request, Depends
from pydantic import HttpUrl
from sqlalchemy import func
from sqlalchemy.orm.session import Session
from app import models, schemas
from app.config import settings
from app.database import get_db
from app.lib import telegram
router = APIRouter()
bot = telegram.Telegram(settings.TELEGRAM_BOT_TOKEN)
def add_user(user: schemas.User, db: Session) -> models.User:
row = models.User(
id=user.id,
username=user.username,
first_name=user.first_name,
last_name=user.last_name,
)
db.add(row)
db.commit()
return row
@router.get("")
async def get_webhook():
return await bot.get_webhook()
@router.post("")
async def set_webhook(url: HttpUrl = Body(..., embed=True)):
return await bot.set_webhook(url)
@router.post(f"/{settings.TELEGRAM_BOT_TOKEN.get_secret_value()}")
async def webhook(request: Request, db: Session = Depends(get_db)):
req = await request.json()
print(req)
update = telegram.schemas.Update.parse_obj(req)
message = update.message
user = update.message.from_
db_user = db.query(models.User).filter_by(id=user.id).first()
if not db_user:
db_user = add_user(user, db)
msg = "✨ '문제' 또는 '퀴즈'라고 말씀하시면 문제를 냅니다!"
if "문제" in message.text or "퀴즈" in message.text:
quiz = db.query(models.Quiz).order_by(func.RAND()).first()
if not quiz:
await bot.send_message(message.chat.id, "퀴즈가 없습니다")
return
db_user.quiz_id = quiz.id
msg = f"{quiz.question}\n\n{quiz.content}"
elif db_user.quiz_id and message.text.isnumeric():
correct = db_user.quiz.answer == int(message.text)
msg = f"아쉽네요, {db_user.quiz.answer}번이 정답입니다."
if correct:
db_user.score += 1
msg = f"{db_user.quiz.answer}번, 정답입니다!"
db_user.quiz_id = None
await bot.send_message(message.chat.id, msg)
db.commit()
return "OK"
|
[
"rurouni24@gmail.com"
] |
rurouni24@gmail.com
|
7a44c1c6a8598cad317cdf98afb950856f18fc76
|
b84e8cfea8b1452387da0562c999aa5fd742dd2c
|
/convert_ksn_to_points_and_delete_extras.py
|
f0ff3c5ab4182ffc85c5a5c1e254906beca4796e
|
[
"MIT"
] |
permissive
|
cmshobe/grass-scripts
|
ba8100b6b4541e700fdeb4f8e555452513f8c7a2
|
5d593f301fd708252e5a69b7304ce28ea8490506
|
refs/heads/master
| 2020-04-04T08:40:21.773623
| 2019-03-20T22:42:19
| 2019-03-20T22:42:19
| 154,883,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,649
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 10:39:29 2018
@author: charlie
This script does two main things:
1) turns the ksn line segments into points (each segment into 3 points)
2) deletes the excess points such that each line segment is only
identified by a single midpoint, which holds the same attribute values
that the ksn lines held.
"""
import numpy as np
from grass.pygrass.modules.shortcuts import general as g
from grass.pygrass.modules.shortcuts import vector as v
from grass.pygrass.modules.shortcuts import database as db
#first, use v.to.points to convert ksn segments to points (there will be a
#point at the beginning, middle, and end of each segment)
chan_segs_lines = 'chan_segs_with_litho_proj'
v.to.points(flags='p', input=chan_segs_lines, type='line', dmax=100)
#then, copy chan_segs_points so we keep a pristine version in case we mess up
g.copy(vector='chan_segs_points,chan_segs_points_trimmed')
#then, loop through an iterable, deleting two out of the three
#ksn points for each segment knowing that there are three points for each
#segment: one has "along"=0, one has "along"=n, and the one we want to keep
#has "along"=n/2
#we use v.edit with the "delete" function
#so I will be selecting the points I want to delete
#THE BELOW LOOP SUCCESSFULLY DELETES ALL ZERO VALUES BUT NOT THE MAXIMA
points_file = 'chan_segs_points'
expression = '"along" = 0'
v.edit(map=points_file, layer=2, type='point', tool='delete',
where=expression)
#NOW NEED A SECOND LOOP TO TAKE CARE OF DELETING THE MAXIMUM VALUES
points_file = 'chan_segs_points'
num_chan_segs = 102318
catlist = []
for chan_seg in range(num_chan_segs):
lcat = chan_seg + 1 #lcats start at 1, not 0.
expression_2 = 'SELECT cat,MAX("along") FROM chan_segs_points_2 WHERE "lcat" = ' + str(lcat)
#use db.select to find the cat of the point I want to delete
filename = 'sql_out.txt'
db.select(overwrite=True, sql=expression_2, separator='comma',
output=filename) #this will select the point I want to
#delete and write it out to line 2 of a csv file. So in Numpy speak
#the way to access the cat is now [0,0] of that csv as long as the
#header row is skipped on import.
data = np.genfromtxt(filename, delimiter=',', skip_header=1)
cat = data[0] #this gives me the category I want to delete!!
catstr = str(int(cat))
catlist.append(catstr)
#turn the list into a format v.edit can understand
catstringall = ",".join(catlist)
#now outside the loop, delete
v.edit(map=points_file, layer=2, type='point', tool='delete',
cats=catstringall)
|
[
"charles.shobe@colorado.edu"
] |
charles.shobe@colorado.edu
|
4ddc745a90e783629283a00b83a524d4e752e4c8
|
4e63d92e2b144d0ae0d0881cf55feaf91a79573d
|
/Light_out Puzzle/light_out_puzzle.py
|
52db2f61e0b2d6d0aa48e1952a198396d419ff80
|
[] |
no_license
|
swaelali/Old-Stuff
|
d943dfc1845741aa6f64dd3e30647065b2f5fad2
|
ce04b1edd6561429108803e8f13a2aaf0d6d5740
|
refs/heads/master
| 2021-01-01T06:03:23.962440
| 2017-07-17T07:52:56
| 2017-07-17T07:52:56
| 97,345,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,332
|
py
|
# Light out Puzzle Solution ALGORITHM
# Solution of Light out puzzle up to 3x3
from GF2 import *
def combine(puzz_matrix):
''' Return a list of all possible combinations according following constraints:
- no repeation
- don't care of the sequence of the numbers'''
buttons = len(puzz_matrix)*len(puzz_matrix[0])
phase = 1
main_combination=[]
combinations = []
while phase < buttons:
if phase == 1:
for x in range(1,buttons +1):
main_combination.append(x)
combinations = main_combination.copy()
phase +=1
else:
temp_combinations = combinations.copy()
for i in range(len(main_combination)):
for x in range(len(combinations)):
temp = int(str(main_combination[i])+str(combinations[x]))
temp_combinations.append(temp)
#Flterations
repeat = buttons
while repeat !=0:
for element in temp_combinations:
temp_element= str(element)
for x in temp_element:
if temp_element.count(x)%2 == 0:
temp_combinations.remove(element)
break
repeat -=1
# inner sorting
comp_element = ''
temp_element_list = []
temp_temp_combinations = []
for element in temp_combinations:
temp_element= str(element)
for x in temp_element:
temp_element_list.append(int(x))
temp_element_list.sort()
for pointer in range(len(temp_element_list)):
comp_element = comp_element+str(temp_element_list[pointer])
temp_temp_combinations.append(int(comp_element))
comp_element = ''
temp_element_list = []
temp_combinations = temp_temp_combinations.copy()
combinations = list(set(temp_combinations)).copy()
combinations.sort()
phase +=1
return combinations
def assign_matrix_elements(puzz_matrix):
''' Return dictionary with pairs of poisition as key and the corresponding of
this poistion in matrix_puzz as value'''
assignment_dic={(x,y):one for x in range(len(puzz_matrix)) for y in range(len(puzz_matrix[0]))}
return assignment_dic
def get_buttons_vectors(puzz_matrix):
''' Return a dictionary of buttons as keys and corresponding vectors (dictionary) as values'''
buttons = len(puzz_matrix)*len(puzz_matrix[0])
buttons_vectors ={}
matrix_pairs = assign_matrix_elements(puzz_matrix)
button_position_dic={}
position_corresponding_map_dic = {}
max_row = len(puzz_matrix)
max_col = len(puzz_matrix[0])
row = 0
col = 0
button = 1
while button < (buttons +1):
for row in range(max_row):
for col in range(max_col):
button_position_dic[button]=(row,col)
button +=1
temp_set =set()
for x in range(max_row):
for y in range(max_col):
temp_set.add((x,y))
if x+1 < max_row:
temp_set.add((x+1,y))
if x-1 >=0:
temp_set.add((x-1,y))
if y+1 < max_col:
temp_set.add((x,y+1))
if y-1 >=0:
temp_set.add((x,y-1))
position_corresponding_map_dic[(x,y)]=temp_set
temp_set = set()
for button in button_position_dic.keys():
buttons_vectors[button]= {x:matrix_pairs[x] for x in position_corresponding_map_dic[button_position_dic[button]]}
return buttons_vectors
def Check(combination,vectors_dic,puzz_matrix):
temp_matrix=puzz_matrix.copy()
extacted_vector ={}
pointing_seq = str(combination)
for button in pointing_seq:
button = int(button)
extracted_vector= vectors_dic[button]
for x in range(len(temp_matrix)):
for y in range(len(temp_matrix[0])):
if (x,y) in extracted_vector.keys():
temp_matrix[x][y] = temp_matrix[x][y]+extracted_vector[(x,y)]
# Checking
for x in range(len(temp_matrix)):
for y in range(len(temp_matrix[0])):
if temp_matrix[x][y] == 0:
temp_matrix=[]
return False
temp_matrix=[]
return True
# Demonstration
original_matrix = [[0,0,one],[one,0,one],[0,one,0]]
print("Combining ...")
combinations = combine(original_matrix)
print(len(combinations)," combinations found!")
print('Solving...')
vectors_dic = get_buttons_vectors(original_matrix)
for combination in combinations:
original_matrix = [[0,0,one],[one,0,one],[0,one,0]]
if Check(combination,vectors_dic,original_matrix):
print("Solution obtained! use that sequence ",combination)
break
|
[
"wa2el.ali@gmail.com"
] |
wa2el.ali@gmail.com
|
629d9cf082028fa5564dadaee985f0da6dc39041
|
159de093afa6e94e853d7721fc96c902e5767bc5
|
/Model/utils.py
|
7f594fae6b66f51d8fec995f04bbc4ff64ea9339
|
[] |
no_license
|
konstantinosKokos/verbs-as-maps-keras
|
efcb6fccdbaeb607f9ae19761e1711283fc12511
|
91ab1eb99e0535c5f321e4cbd16a6609a84072ad
|
refs/heads/master
| 2021-04-15T04:29:03.150646
| 2018-06-25T06:28:53
| 2018-06-25T06:28:53
| 126,683,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,382
|
py
|
import numpy as np
from keras.utils import to_categorical
import spacy
from matplotlib import pyplot as plt
nlp = spacy.load('en_vectors_web_lg')
def flatten(relations):
# flatten relations
return [item for sublist in [[(i,j) for j in relations[i]] for i in relations] for item in sublist]
def remove_dups(pairs):
# hold unique pairs only
for i, p0 in enumerate(pairs):
for j, p1 in enumerate(pairs):
if p0[2] == p1[0] and p0[3] == p1[1] and p0[0] == p1[2] and p0[1] == p1[3]:
del(pairs[j])
def data_generator(verbs, objects, pairs, batch_size = 64, random_chance = 0.5, return_signatures=False,
index = 0, shuffle=False, random_progression = None):
"""
Iterates over vo paraphrase pairs, yielding their corresponding label
"""
if shuffle:
pairs = np.random.permutation(pairs).tolist()
num_verbs = len(verbs)+1
num_passes = 0
v0, o0, v0sig, v1, o1, v1sig, t = [], [], [], [], [], [], []
while True:
if np.random.random() > random_chance:
current_sample = pairs[index]
t.append(1)
else:
random_index = np.random.randint(len(pairs)) # Pick a random paraphrase pair from the dataset
offset = np.random.choice([0,2]) # Pick a random phrase from this pair
current_sample = [pairs[index][0], pairs[index][1], # First phrase is from the original sample
pairs[random_index][offset], pairs[random_index][offset] ] # Second phrase is from the random sample
if ([current_sample[2], current_sample[3], current_sample[0], current_sample[1]] in pairs
or current_sample in pairs): # Make sure that the random sample isn't actually a paraphrase
t.append(1)
else:
t.append(0)
if index == len(pairs)-1:
index = 0
num_passes += 1
if random_progression:
progression = random_progression(random_chance, num_passes)
if progression != random_chance:
random_chance = progression
print('\nNew random chance: ', random_chance)
if shuffle: pairs = np.random.permutation(pairs).tolist()
else: index += 1
v0.append(nlp(verbs[current_sample[0]]).vector)
o0.append(nlp(objects[current_sample[1]]).vector)
v0sig.append(to_categorical(current_sample[0], num_verbs))
v1.append(nlp(verbs[current_sample[2]]).vector)
o1.append(nlp(objects[current_sample[3]]).vector)
v1sig.append(to_categorical(current_sample[2], num_verbs))
if len(v0) == batch_size:
v0, o0, v0sig, v1, o1, v1sig, t = (np.array(v0), np.array(o0), np.array(v0sig),
np.array(v1), np.array(o1), np.array(v1sig),
np.array(t))
if return_signatures: yield [v0, o0, v0sig, v1, o1, v1sig], t
else: yield [v0, o0, v1, o1], t
v0, o0, v0sig, v1, o1, v1sig, t = [], [], [], [], [], [], []
def evaluation_generator(verbs, objects, pairs, batch_size = 256, index = 0, return_signatures=True):
num_verbs = len(verbs) + 1
vs, os = [], []
while index < len(pairs):
current_sample = pairs[index]
current_verb = current_sample[0]
current_object = current_sample[1]
if return_signatures:
vs.append(to_categorical(current_verb, num_verbs))
else:
vs.append(nlp(verbs[current_sample[0]]).vector)
os.append(nlp(objects[current_object]).vector)
current_verb = current_sample[2]
current_object = current_sample[3]
if return_signatures:
vs.append(to_categorical(current_verb, num_verbs))
else:
vs.append(nlp(verbs[current_sample[0]]).vector)
os.append(nlp(objects[current_object]).vector)
index += 1
if len(vs) == batch_size:
vs , os = np.array(vs), np.array(os)
yield [vs, os]
vs, os = [] , []
def histplot(history):
for key in history:
plt.plot((history[key]))
plt.legend([key for key in history.keys()])
plt.show()
|
[
"konstantinos@riseup.net"
] |
konstantinos@riseup.net
|
1e6ee13bf63328809e46af158476b05bba907c72
|
201356e09fb6dd82d36ed5b93b08a29482b68fb2
|
/Mine/Captcha/Captcha_1,py.py
|
d79c8657b63d784669cc70f9025b13caf436bd60
|
[] |
no_license
|
M45t3rJ4ck/Py-Code
|
5971bad5304ea3d06c1cdbd065941271c33e4254
|
32063d149824eb22163ea462937e4c26917a8b14
|
refs/heads/master
| 2020-04-08T05:03:44.772327
| 2018-11-26T06:41:03
| 2018-11-26T06:41:03
| 159,044,079
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 703
|
py
|
Python 3.6.3 (v3.6.3:2c5fed8, Oct 3 2017, 18:11:49) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> def gen_captcha_text_and_image(width=CAPTCHA_WIDTH, height=CAPTCHA_HEIGHT,save=None):
'''
???????
:param width:
:param height:
:param save:
:return: np??
'''
image = ImageCaptcha(width=width, height=height)
# ?????
captcha_text = random_captcha_text()
captcha = image.generate(captcha_text)
# ??
if save: image.write(captcha_text, captcha_text + '.jpg')
captcha_image = Image.open(captcha)
# ???np??
captcha_image = np.array(captcha_image)
return captcha_text, captcha_image
|
[
"wavoges@gmail.com"
] |
wavoges@gmail.com
|
60777be6c4991ec9c50d572f450f4d3463e966f9
|
294b9a6740c574ccc8132f3b527ae34ceab64182
|
/src/coupons/models.py
|
d3b07edddb1bdf88bd3242a76093ed1044a52011
|
[] |
no_license
|
werdani/E-commerce
|
cc172aa79d0171698d9adbb47681519c3406966d
|
8f0f3ce4dfad47226cc6f27e5bc5c32b93219499
|
refs/heads/main
| 2023-04-02T17:22:56.096635
| 2021-04-19T01:07:59
| 2021-04-19T01:07:59
| 353,048,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
from django.db import models
from django.core.validators import MinValueValidator,MaxValueValidator
class Coupon(models.Model):
code = models.CharField(max_length=50,unique=True)
valid_from = models.DateTimeField()
valid_to = models.DateTimeField()
discount = models.IntegerField(validators=[MinValueValidator(0),MaxValueValidator(100)])
active = models.BooleanField()
def __str__(self):
return self.code
|
[
"ammaryasser554zz@gmail.com"
] |
ammaryasser554zz@gmail.com
|
2669ee460c1a20c17cadb93ee23f61342d7b186e
|
b93e0e0c09e224c570d01d97f9ddbef184c9d57b
|
/listings/views.py
|
56b982554031a2453ca53128e99ebc7c936e4654
|
[] |
no_license
|
tj-26/Django_real_estate_project
|
835e4a53ee24276baeded5fc8ddf688557e36836
|
1ef005595b2b9448dc37bde7d560b0eab912b65f
|
refs/heads/master
| 2023-02-28T02:50:34.328707
| 2021-01-12T08:16:00
| 2021-01-12T08:16:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
from django.shortcuts import render
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from .models import Listing
# Create your views here.
def index(request):
listings = Listing.objects.order_by('-list_date').filter(is_published=True)
paginator = Paginator(listings, 6)
page = request.GET.get('page')
paged_listings = paginator.get_page(page)
context = {
'listings':paged_listings
}
return render(request, 'listings/listings.html', context)
def listing(request, listing_id):
return render(request, 'listings/listing.html')
def search(request):
return render(request, 'listings/search.html')
|
[
"yashikakhuranayashika@gmail.com"
] |
yashikakhuranayashika@gmail.com
|
eff00c0673ffecede2b7c7014027465b9394798d
|
94be2850eb6fc17a081221434326bcad47193419
|
/resolwe/flow/views/descriptor.py
|
73160e668b2397a104e82bc2eef3f6ef143398ab
|
[
"Apache-2.0"
] |
permissive
|
lukaw3d/resolwe
|
6412aeabaa62d3b6f59adb1e50781cbab6238e74
|
7e5af6dab31e22d598563f71c96d637125c83c9e
|
refs/heads/master
| 2020-04-26T19:50:33.039022
| 2018-12-20T13:17:08
| 2019-02-27T10:11:16
| 173,788,495
| 0
| 0
| null | 2019-03-04T17:16:28
| 2019-03-04T17:16:28
| null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
"""Descriptor schema viewset."""
from rest_framework import mixins, viewsets
from resolwe.flow.filters import DescriptorSchemaFilter
from resolwe.flow.models import DescriptorSchema
from resolwe.flow.serializers import DescriptorSchemaSerializer
from resolwe.permissions.loader import get_permissions_class
from resolwe.permissions.mixins import ResolwePermissionsMixin
class DescriptorSchemaViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
ResolwePermissionsMixin,
viewsets.GenericViewSet):
"""API view for :class:`DescriptorSchema` objects."""
queryset = DescriptorSchema.objects.all().prefetch_related('contributor')
serializer_class = DescriptorSchemaSerializer
permission_classes = (get_permissions_class(),)
filter_class = DescriptorSchemaFilter
ordering_fields = ('id', 'created', 'modified', 'name', 'version')
ordering = ('id',)
|
[
"domen@blenkus.com"
] |
domen@blenkus.com
|
cc63557a55b54a3140892024becb08eb8143da95
|
666100eef842fde48b2d478193ab5604f620ed0c
|
/env/bin/pycodestyle
|
6b13152da291444913f0968bc5f27021d4bf8c1b
|
[] |
no_license
|
bhavyagoel/ServerStat
|
342d5dc63909fe88fe4ce0fb270c5b595471c789
|
976d5596f07f3a833ba3448aad42fea42ae901cf
|
refs/heads/main
| 2023-09-03T03:32:07.793752
| 2021-11-17T23:33:08
| 2021-11-17T23:33:08
| 429,194,079
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
#!/home/bhavyagoel/dev/GitHubProj/SysProcess-Notifier/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pycodestyle import _main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(_main())
|
[
"bgoel4132@gmail.com"
] |
bgoel4132@gmail.com
|
|
194352f4e5b812be2c874173ae472be4067fd1dd
|
49f993f260b5d1cbbe1f3591aa5ed1776a481463
|
/apps.py
|
f1fb04555b6a14994760e5269d06e397e1e33542
|
[] |
no_license
|
naye0ng/Instagram
|
5d35c886960497e5db031079e2f6b66b5b3e8c65
|
f35791ddc7c0606296837bc2ad9f597c626baa35
|
refs/heads/master
| 2020-05-05T13:50:15.158715
| 2019-04-18T03:14:32
| 2019-04-18T03:14:32
| 180,095,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
from django.apps import AppConfig
class SxhoolConfig(AppConfig):
name = 'sxhool'
|
[
"nayeong_e@naver.com"
] |
nayeong_e@naver.com
|
134e39484a570636be4d26a1a2a9021b143f6ef6
|
c0cef05d528d33cb7e7f49ec2758dcd138e49577
|
/venv/bin/wheel
|
9ef8a1b9c59837bb64c138e33ec2556e9b0ca4f5
|
[] |
no_license
|
MaryamBisadi/Email_Author_Identification
|
26b13e350ea55fe0ee432b483844797cd3d57e92
|
e5d1e40507a49fd7177ff781bf8ba7345aae4054
|
refs/heads/master
| 2020-03-18T13:42:43.734711
| 2018-06-25T05:09:48
| 2018-06-25T05:09:48
| 134,802,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
#!/Users/marybisadi/PycharmProjects/Email_Author_Detection/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"marybisadi@Marys-iMac.local"
] |
marybisadi@Marys-iMac.local
|
|
a39f77e46e9e1fb8b14b0aaf41e10dcd7f1e3c29
|
c459f4dd7b198ec8d8db8379726a5b2650be6636
|
/scripts/import_adjustment_majors.py
|
df5e77c03b7f00dd483c429bb63eb0f40bc23aef
|
[] |
no_license
|
jittat/admapp
|
4c712182cd06e82efab6c2513fb865e5d00feae8
|
38bf299015ae423b4551f6b1206742ee176b8b77
|
refs/heads/master
| 2023-06-10T03:23:41.174264
| 2023-06-09T19:41:03
| 2023-06-09T19:41:03
| 101,953,724
| 10
| 4
| null | 2023-04-21T22:48:55
| 2017-08-31T03:12:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,371
|
py
|
from django_bootstrap import bootstrap
bootstrap()
import sys
import csv
from appl.models import Faculty
from backoffice.models import AdjustmentMajor
def main():
filename = sys.argv[1]
counter = 0
with open(filename) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
first = True
for items in reader:
if first:
first = False
continue
facid = items[0]
full_code = items[1].strip()
title = items[3]
faculty_title = items[2]
print(faculty_title)
faculty = Faculty.objects.get(title=faculty_title)
old_adj_majors = AdjustmentMajor.objects.filter(full_code=full_code).all()
if len(old_adj_majors)!=0:
adj_major = old_adj_majors[0]
else:
adj_major = AdjustmentMajor()
adj_major.full_code = full_code
adj_major.title = title
adj_major.faculty = faculty
adj_major.major_code = full_code
adj_major.study_type_code = items[4]
adj_major.save()
print(adj_major, faculty, adj_major.major_code, adj_major.study_type_code)
counter += 1
print('Imported',counter,'majors')
if __name__ == '__main__':
main()
|
[
"jittat@gmail.com"
] |
jittat@gmail.com
|
aa1cf0e2c7f2b64c5aec026a5acd76c4686f7dc5
|
ab1dd7005b4a4273da3771665250c0b3fcf9d0c1
|
/scripts/test_TvsR_2.py
|
1389d4e348d2a80f1affcd4c9dfb71c98ab049d8
|
[] |
no_license
|
jordan-stone/Disks
|
5388eb29e1bc38382ddb2cd25b921e31decba9bc
|
8083c5e1c17e22356a396dfd7c08ed3df5fae605
|
refs/heads/master
| 2021-01-17T04:43:33.635093
| 2016-07-01T21:55:33
| 2016-07-01T21:55:33
| 61,930,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
from Disks.TvsR import *
from Disks.Baraffe import read_baraffe
import matplotlib.pyplot as mpl
import numpy as np
#6000,1500,600,300,50,10,2
#even as few as 2 sampled ts seems to result in the same curve...
d=read_baraffe(0.1)
a=np.linspace(0.1,100,1000)
tr,mdotr=active_and_irradiated_combined_opacity(a,0.1,d['r'][0],d['Teff'][0],sampled_ts=np.linspace(10,3000,10))
mpl.plot(tr,'r-')
tr0,mdotr0=active_and_irradiated_combined_opacity(a,0.1,d['r'][0],d['Teff'][0],sampled_ts=np.linspace(10,3000,2))
mpl.plot(tr0,'b-')
|
[
"jstone@as.arizona.edu"
] |
jstone@as.arizona.edu
|
30e52b2ab9a3bacba9f7f2714ab66d90cf8b7afc
|
65ed1f3570075d4bd07602d088559a4b27c26cc9
|
/pyAudioAnalysis/audioFeatureExtraction.py
|
400ba3b15daa08ff79f12d96514a8580e0a046be
|
[
"Apache-2.0"
] |
permissive
|
trinhkhoi/MIR
|
c97f7b31c94f8ddfc0686c515c26696bccad6519
|
fe264abedbd8bc9142b045a1225bd2f497c15a51
|
refs/heads/master
| 2020-03-15T19:34:38.825605
| 2018-05-06T06:30:55
| 2018-05-06T06:30:55
| 132,307,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,467
|
py
|
import sys
import time
import os
import glob
import numpy
import pickle as cPickle
import aifc
import math
from numpy import NaN, Inf, arange, isscalar, array
from scipy.fftpack import rfft
from scipy.fftpack import fft
from scipy.fftpack.realtransforms import dct
from scipy.signal import fftconvolve
from matplotlib.mlab import find
import matplotlib.pyplot as plt
from scipy import linalg as la
#import Test.pyAudioAnalysis.audioTrainTest as aT
import pyAudioAnalysis.audioBasicIO as audioBasicIO
import pyAudioAnalysis.utilities as utilities
from scipy.signal import lfilter, hamming
#from scikits.talkbox.linpred.levinson_lpc import lpc
#from scikits.talkbox.linpred.levinson_lpc import lpc
import importlib as imp
#imp.reload(sys)
#sys.setdefaultencoding('utf8')
eps = 0.00000001
""" Time-domain audio features """
def stZCR(frame):
"""Computes zero crossing rate of frame"""
count = len(frame)
countZ = numpy.sum(numpy.abs(numpy.diff(numpy.sign(frame)))) / 2
return (numpy.float64(countZ) / numpy.float64(count-1.0))
def stEnergy(frame):
"""Computes signal energy of frame"""
return numpy.sum(frame ** 2) / numpy.float64(len(frame))
def stEnergyEntropy(frame, numOfShortBlocks=10):
"""Computes entropy of energy"""
Eol = numpy.sum(frame ** 2) # total frame energy
L = len(frame)
subWinLength = int(numpy.floor(L / numOfShortBlocks))
if L != subWinLength * numOfShortBlocks:
frame = frame[0:subWinLength * numOfShortBlocks]
# subWindows is of size [numOfShortBlocks x L]
subWindows = frame.reshape(subWinLength, numOfShortBlocks, order='F').copy()
# Compute normalized sub-frame energies:
s = numpy.sum(subWindows ** 2, axis=0) / (Eol + eps)
# Compute entropy of the normalized sub-frame energies:
Entropy = -numpy.sum(s * numpy.log2(s + eps))
return Entropy
""" Frequency-domain audio features """
def stSpectralCentroidAndSpread(X, fs):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (numpy.arange(1, len(X) + 1)) * (fs/(2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = numpy.sum(ind * Xt)
DEN = numpy.sum(Xt) + eps
# Centroid:
C = (NUM / DEN)
# Spread:
S = numpy.sqrt(numpy.sum(((ind - C) ** 2) * Xt) / DEN)
# Normalize:
C = C / (fs / 2.0)
S = S / (fs / 2.0)
return (C, S)
def stSpectralEntropy(X, numOfShortBlocks=10):
"""Computes the spectral entropy"""
L = len(X) # number of frame samples
Eol = numpy.sum(X ** 2) # total spectral energy
subWinLength = int(numpy.floor(L / numOfShortBlocks)) # length of sub-frame
if L != subWinLength * numOfShortBlocks:
X = X[0:subWinLength * numOfShortBlocks]
subWindows = X.reshape(subWinLength, numOfShortBlocks, order='F').copy() # define sub-frames (using matrix reshape)
s = numpy.sum(subWindows ** 2, axis=0) / (Eol + eps) # compute spectral sub-energies
En = -numpy.sum(s*numpy.log2(s + eps)) # compute spectral entropy
return En
def stSpectralFlux(X, Xprev):
"""
Computes the spectral flux feature of the current frame
ARGUMENTS:
X: the abs(fft) of the current frame
Xpre: the abs(fft) of the previous frame
"""
# compute the spectral flux as the sum of square distances:
sumX = numpy.sum(X + eps)
sumPrevX = numpy.sum(Xprev + eps)
F = numpy.sum((X / sumX - Xprev/sumPrevX) ** 2)
return F
def stSpectralRollOff(X, c, fs):
"""Computes spectral roll-off"""
totalEnergy = numpy.sum(X ** 2)
fftLength = len(X)
Thres = c*totalEnergy
# Ffind the spectral rolloff as the frequency position where the respective spectral energy is equal to c*totalEnergy
CumSum = numpy.cumsum(X ** 2) + eps
[a, ] = numpy.nonzero(CumSum > Thres)
if len(a) > 0:
mC = numpy.float64(a[0]) / (float(fftLength))
else:
mC = 0.0
return (mC)
def stHarmonic(frame, fs):
"""
Computes harmonic ratio and pitch
"""
M = numpy.round(0.016 * fs) - 1
R = numpy.correlate(frame, frame, mode='full')
g = R[len(frame)-1]
R = R[len(frame):-1]
# estimate m0 (as the first zero crossing of R)
[a, ] = numpy.nonzero(numpy.diff(numpy.sign(R)))
if len(a) == 0:
m0 = len(R)-1
else:
m0 = a[0]
if M > len(R):
M = len(R) - 1
Gamma = numpy.zeros((M), dtype=numpy.float64)
CSum = numpy.cumsum(frame ** 2)
Gamma[m0:M] = R[m0:M] / (numpy.sqrt((g * CSum[M:m0:-1])) + eps)
ZCR = stZCR(Gamma)
if ZCR > 0.15:
HR = 0.0
f0 = 0.0
else:
if len(Gamma) == 0:
HR = 1.0
blag = 0.0
Gamma = numpy.zeros((M), dtype=numpy.float64)
else:
HR = numpy.max(Gamma)
blag = numpy.argmax(Gamma)
# Get fundamental frequency:
f0 = fs / (blag + eps)
if f0 > 5000:
f0 = 0.0
if HR < 0.1:
f0 = 0.0
return (HR, f0)
def mfccInitFilterBanks(fs, nfft):
"""
Computes the triangular filterbank for MFCC computation (used in the stFeatureExtraction function before the stMFCC function call)
This function is taken from the scikits.talkbox library (MIT Licence):
https://pypi.python.org/pypi/scikits.talkbox
"""
# filter bank params:
lowfreq = 133.33
linsc = 200/3.
logsc = 1.0711703
numLinFiltTotal = 13
numLogFilt = 27
if fs < 8000:
nlogfil = 5
# Total number of filters
nFiltTotal = numLinFiltTotal + numLogFilt
# Compute frequency points of the triangle:
freqs = numpy.zeros(nFiltTotal+2)
freqs[:numLinFiltTotal] = lowfreq + numpy.arange(numLinFiltTotal) * linsc
freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** numpy.arange(1, numLogFilt + 3)
heights = 2./(freqs[2:] - freqs[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = numpy.zeros((nFiltTotal, nfft))
nfreqs = numpy.arange(nfft) / (1. * nfft) * fs
for i in range(nFiltTotal):
lowTrFreq = freqs[i]
cenTrFreq = freqs[i+1]
highTrFreq = freqs[i+2]
lid = numpy.arange(numpy.floor(lowTrFreq * nfft / fs) + 1, numpy.floor(cenTrFreq * nfft / fs) + 1, dtype=numpy.int)
lslope = heights[i] / (cenTrFreq - lowTrFreq)
rid = numpy.arange(numpy.floor(cenTrFreq * nfft / fs) + 1, numpy.floor(highTrFreq * nfft / fs) + 1, dtype=numpy.int)
rslope = heights[i] / (highTrFreq - cenTrFreq)
fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)
fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])
return fbank, freqs
def stMFCC(X, fbank, nceps):
"""
Computes the MFCCs of a frame, given the fft mag
ARGUMENTS:
X: fft magnitude abs(FFT)
fbank: filter bank (see mfccInitFilterBanks)
RETURN
ceps: MFCCs (13 element vector)
Note: MFCC calculation is, in general, taken from the scikits.talkbox library (MIT Licence),
# with a small number of modifications to make it more compact and suitable for the pyAudioAnalysis Lib
"""
mspec = numpy.log10(numpy.dot(X, fbank.T)+eps)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:nceps]
return ceps
def stChromaFeaturesInit(nfft, fs):
"""
This function initializes the chroma matrices used in the calculation of the chroma features
"""
freqs = numpy.array([((f + 1) * fs) / (2 * nfft) for f in range(nfft)])
Cp = 27.50
nChroma = numpy.round(12.0 * numpy.log2(freqs / Cp)).astype(int)
nFreqsPerChroma = numpy.zeros((nChroma.shape[0], ))
uChroma = numpy.unique(nChroma)
for u in uChroma:
idx = numpy.nonzero(nChroma == u)
nFreqsPerChroma[idx] = idx[0].shape
return nChroma, nFreqsPerChroma
def stChromaFeatures(X, fs, nChroma, nFreqsPerChroma):
#TODO: 1 complexity
#TODO: 2 bug with large windows
chromaNames = ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#']
spec = X**2
if nChroma.max()<nChroma.shape[0]:
C = numpy.zeros((nChroma.shape[0],))
C[nChroma] = spec
C /= nFreqsPerChroma[nChroma]
else:
I = numpy.nonzero(nChroma>nChroma.shape[0])[0][0]
C = numpy.zeros((nChroma.shape[0],))
C[nChroma[0:I-1]] = spec
C /= nFreqsPerChroma
finalC = numpy.zeros((12, 1))
newD = int(numpy.ceil(C.shape[0] / 12.0) * 12)
C2 = numpy.zeros((newD, ))
C2[0:C.shape[0]] = C
C2 = C2.reshape(int(C2.shape[0]/12), 12)
#for i in range(12):
# finalC[i] = numpy.sum(C[i:C.shape[0]:12])
finalC = numpy.matrix(numpy.sum(C2, axis=0)).T
finalC /= spec.sum()
# ax = plt.gca()
# plt.hold(False)
# plt.plot(finalC)
# ax.set_xticks(range(len(chromaNames)))
# ax.set_xticklabels(chromaNames)
# xaxis = numpy.arange(0, 0.02, 0.01);
# ax.set_yticks(range(len(xaxis)))
# ax.set_yticklabels(xaxis)
# plt.show(block=False)
# plt.draw()
return chromaNames, finalC
def stChromagram(signal, Fs, Win, Step, PLOT=False):
"""
Short-term FFT mag for spectogram estimation:
Returns:
a numpy array (nFFT x numOfShortTermWindows)
ARGUMENTS:
signal: the input signal samples
Fs: the sampling freq (in Hz)
Win: the short-term window size (in samples)
Step: the short-term window step (in samples)
PLOT: flag, 1 if results are to be ploted
RETURNS:
"""
Win = int(Win)
Step = int(Step)
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / (MAX - DC)
N = len(signal) # total number of signals
curPos = 0
countFrames = 0
nfft = int(Win / 2)
nChroma, nFreqsPerChroma = stChromaFeaturesInit(nfft, Fs)
chromaGram = numpy.array([], dtype=numpy.float64)
while (curPos + Win - 1 < N):
countFrames += 1
x = signal[curPos:curPos + Win]
curPos = curPos + Step
X = abs(fft(x))
X = X[0:nfft]
X = X / len(X)
chromaNames, C = stChromaFeatures(X, Fs, nChroma, nFreqsPerChroma)
C = C[:, 0]
if countFrames == 1:
chromaGram = C.T
else:
chromaGram = numpy.vstack((chromaGram, C.T))
FreqAxis = chromaNames
TimeAxis = [(t * Step) / Fs for t in range(chromaGram.shape[0])]
if (PLOT):
fig, ax = plt.subplots()
chromaGramToPlot = chromaGram.transpose()[::-1, :]
Ratio = chromaGramToPlot.shape[1] / (3*chromaGramToPlot.shape[0])
if Ratio < 1:
Ratio = 1
chromaGramToPlot = numpy.repeat(chromaGramToPlot, Ratio, axis=0)
imgplot = plt.imshow(chromaGramToPlot)
Fstep = int(nfft / 5.0)
# FreqTicks = range(0, int(nfft) + Fstep, Fstep)
# FreqTicksLabels = [str(Fs/2-int((f*Fs) / (2*nfft))) for f in FreqTicks]
ax.set_yticks(range(Ratio / 2, len(FreqAxis) * Ratio, Ratio))
ax.set_yticklabels(FreqAxis[::-1])
TStep = countFrames / 3
TimeTicks = range(0, countFrames, TStep)
TimeTicksLabels = ['%.2f' % (float(t * Step) / Fs) for t in TimeTicks]
ax.set_xticks(TimeTicks)
ax.set_xticklabels(TimeTicksLabels)
ax.set_xlabel('time (secs)')
imgplot.set_cmap('jet')
plt.colorbar()
plt.show()
return (chromaGram, TimeAxis, FreqAxis)
def phormants(x, Fs):
N = len(x)
w = numpy.hamming(N)
# Apply window and high pass filter.
x1 = x * w
x1 = lfilter([1], [1., 0.63], x1)
# Get LPC.
ncoeff = 2 + Fs / 1000
A, e, k = lpc(x1, ncoeff)
#A, e, k = lpc(x1, 8)
# Get roots.
rts = numpy.roots(A)
rts = [r for r in rts if numpy.imag(r) >= 0]
# Get angles.
angz = numpy.arctan2(numpy.imag(rts), numpy.real(rts))
# Get frequencies.
frqs = sorted(angz * (Fs / (2 * math.pi)))
return frqs
def beatExtraction(stFeatures, winSize, PLOT=False):
"""
This function extracts an estimate of the beat rate for a musical signal.
ARGUMENTS:
- stFeatures: a numpy array (numOfFeatures x numOfShortTermWindows)
- winSize: window size in seconds
RETURNS:
- BPM: estimates of beats per minute
- Ratio: a confidence measure
"""
# Features that are related to the beat tracking task:
toWatch = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]
maxBeatTime = int(round(2.0 / winSize))
HistAll = numpy.zeros((maxBeatTime,))
for ii, i in enumerate(toWatch): # for each feature
DifThres = 2.0 * (numpy.abs(stFeatures[i, 0:-1] - stFeatures[i, 1::])).mean() # dif threshold (3 x Mean of Difs)
if DifThres<=0:
DifThres = 0.0000000000000001
[pos1, _] = utilities.peakdet(stFeatures[i, :], DifThres) # detect local maxima
posDifs = [] # compute histograms of local maxima changes
for j in range(len(pos1)-1):
posDifs.append(pos1[j+1]-pos1[j])
[HistTimes, HistEdges] = numpy.histogram(posDifs, numpy.arange(0.5, maxBeatTime + 1.5))
HistCenters = (HistEdges[0:-1] + HistEdges[1::]) / 2.0
HistTimes = HistTimes.astype(float) / stFeatures.shape[1]
HistAll += HistTimes
if PLOT:
plt.subplot(9, 2, ii + 1)
plt.plot(stFeatures[i, :], 'k')
for k in pos1:
plt.plot(k, stFeatures[i, k], 'k*')
f1 = plt.gca()
f1.axes.get_xaxis().set_ticks([])
f1.axes.get_yaxis().set_ticks([])
if PLOT:
plt.show(block=False)
plt.figure()
# Get beat as the argmax of the agregated histogram:
I = numpy.argmax(HistAll)
BPMs = 60 / (HistCenters * winSize)
BPM = BPMs[I]
# ... and the beat ratio:
Ratio = HistAll[I] / HistAll.sum()
if PLOT:
# filter out >500 beats from plotting:
HistAll = HistAll[BPMs < 500]
BPMs = BPMs[BPMs < 500]
plt.plot(BPMs, HistAll, 'k')
plt.xlabel('Beats per minute')
plt.ylabel('Freq Count')
plt.show(block=True)
return BPM, Ratio
def stSpectogram(signal, Fs, Win, Step, PLOT=False):
"""
Short-term FFT mag for spectogram estimation:
Returns:
a numpy array (nFFT x numOfShortTermWindows)
ARGUMENTS:
signal: the input signal samples
Fs: the sampling freq (in Hz)
Win: the short-term window size (in samples)
Step: the short-term window step (in samples)
PLOT: flag, 1 if results are to be ploted
RETURNS:
"""
Win = int(Win)
Step = int(Step)
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / (MAX - DC)
N = len(signal) # total number of signals
curPos = 0
countFrames = 0
nfft = int(Win / 2)
specgram = numpy.array([], dtype=numpy.float64)
while (curPos + Win - 1 < N):
countFrames += 1
x = signal[curPos:curPos+Win]
curPos = curPos + Step
X = abs(fft(x))
X = X[0:nfft]
X = X / len(X)
if countFrames == 1:
specgram = X ** 2
else:
specgram = numpy.vstack((specgram, X))
FreqAxis = [((f + 1) * Fs) / (2 * nfft) for f in range(specgram.shape[1])]
TimeAxis = [(t * Step) / Fs for t in range(specgram.shape[0])]
if (PLOT):
fig, ax = plt.subplots()
imgplot = plt.imshow(specgram.transpose()[::-1, :])
Fstep = int(nfft / 5.0)
FreqTicks = range(0, int(nfft) + Fstep, Fstep)
FreqTicksLabels = [str(Fs / 2 - int((f * Fs) / (2 * nfft))) for f in FreqTicks]
ax.set_yticks(FreqTicks)
ax.set_yticklabels(FreqTicksLabels)
TStep = countFrames/3
TimeTicks = range(0, countFrames, TStep)
TimeTicksLabels = ['%.2f' % (float(t * Step) / Fs) for t in TimeTicks]
ax.set_xticks(TimeTicks)
ax.set_xticklabels(TimeTicksLabels)
ax.set_xlabel('time (secs)')
ax.set_ylabel('freq (Hz)')
imgplot.set_cmap('jet')
plt.colorbar()
plt.show()
return (specgram, TimeAxis, FreqAxis)
""" Windowing and feature extraction """
def stFeatureExtraction(signal, Fs, Win, Step):
"""
This function implements the shor-term windowing process. For each short-term window a set of features is extracted.
This results to a sequence of feature vectors, stored in a numpy matrix.
ARGUMENTS
signal: the input signal samples
Fs: the sampling freq (in Hz)
Win: the short-term window size (in samples)
Step: the short-term window step (in samples)
RETURNS
stFeatures: a numpy array (numOfFeatures x numOfShortTermWindows)
"""
Win = int(Win)
Step = int(Step)
# Signal normalization
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / (MAX + 0.0000000001)
N = len(signal) # total number of samples
curPos = 0
countFrames = 0
nFFT = int(Win / 2)
[fbank, freqs] = mfccInitFilterBanks(Fs, nFFT) # compute the triangular filter banks used in the mfcc calculation
nChroma, nFreqsPerChroma = stChromaFeaturesInit(nFFT, Fs)
numOfTimeSpectralFeatures = 8
numOfHarmonicFeatures = 0
nceps = 13
numOfChromaFeatures = 13
totalNumOfFeatures = numOfTimeSpectralFeatures + nceps + numOfHarmonicFeatures + numOfChromaFeatures
# totalNumOfFeatures = numOfTimeSpectralFeatures + nceps + numOfHarmonicFeatures
stFeatures = []
while (curPos + Win - 1 < N): # for each short-term window until the end of signal
countFrames += 1
x = signal[curPos:curPos+Win] # get current window
curPos = curPos + Step # update window position
X = abs(fft(x)) # get fft magnitude
X = X[0:nFFT] # normalize fft
X = X / len(X)
if countFrames == 1:
Xprev = X.copy() # keep previous fft mag (used in spectral flux)
curFV = numpy.zeros((totalNumOfFeatures, 1))
curFV[0] = stZCR(x) # zero crossing rate
curFV[1] = stEnergy(x) # short-term energy
curFV[2] = stEnergyEntropy(x) # short-term entropy of energy
[curFV[3], curFV[4]] = stSpectralCentroidAndSpread(X, Fs) # spectral centroid and spread
curFV[5] = stSpectralEntropy(X) # spectral entropy
curFV[6] = stSpectralFlux(X, Xprev) # spectral flux
curFV[7] = stSpectralRollOff(X, 0.90, Fs) # spectral rolloff
curFV[numOfTimeSpectralFeatures:numOfTimeSpectralFeatures+nceps, 0] = stMFCC(X, fbank, nceps).copy() # MFCCs
chromaNames, chromaF = stChromaFeatures(X, Fs, nChroma, nFreqsPerChroma)
curFV[numOfTimeSpectralFeatures + nceps: numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF
curFV[numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF.std()
stFeatures.append(curFV)
# delta features
'''
if countFrames>1:
delta = curFV - prevFV
curFVFinal = numpy.concatenate((curFV, delta))
else:
curFVFinal = numpy.concatenate((curFV, curFV))
prevFV = curFV
stFeatures.append(curFVFinal)
'''
# end of delta
Xprev = X.copy()
stFeatures = numpy.concatenate(stFeatures, 1)
return stFeatures
def mtFeatureExtraction(signal, Fs, mtWin, mtStep, stWin, stStep):
"""
Mid-term feature extraction
"""
mtWinRatio = int(round(mtWin / stStep))
mtStepRatio = int(round(mtStep / stStep))
mtFeatures = []
stFeatures = stFeatureExtraction(signal, Fs, stWin, stStep)
numOfFeatures = len(stFeatures)
numOfStatistics = 2
mtFeatures = []
#for i in range(numOfStatistics * numOfFeatures + 1):
for i in range(numOfStatistics * numOfFeatures):
mtFeatures.append([])
for i in range(numOfFeatures): # for each of the short-term features:
curPos = 0
N = len(stFeatures[i])
while (curPos < N):
N1 = curPos
N2 = curPos + mtWinRatio
if N2 > N:
N2 = N
curStFeatures = stFeatures[i][N1:N2]
mtFeatures[i].append(numpy.mean(curStFeatures))
mtFeatures[i+numOfFeatures].append(numpy.std(curStFeatures))
#mtFeatures[i+2*numOfFeatures].append(numpy.std(curStFeatures) / (numpy.mean(curStFeatures)+0.00000010))
curPos += mtStepRatio
return numpy.array(mtFeatures), stFeatures
# TODO
def stFeatureSpeed(signal, Fs, Win, Step):
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / MAX
# print (numpy.abs(signal)).max()
N = len(signal) # total number of signals
curPos = 0
countFrames = 0
lowfreq = 133.33
linsc = 200/3.
logsc = 1.0711703
nlinfil = 13
nlogfil = 27
nceps = 13
nfil = nlinfil + nlogfil
nfft = Win / 2
if Fs < 8000:
nlogfil = 5
nfil = nlinfil + nlogfil
nfft = Win / 2
# compute filter banks for mfcc:
[fbank, freqs] = mfccInitFilterBanks(Fs, nfft, lowfreq, linsc, logsc, nlinfil, nlogfil)
numOfTimeSpectralFeatures = 8
numOfHarmonicFeatures = 1
totalNumOfFeatures = numOfTimeSpectralFeatures + nceps + numOfHarmonicFeatures
#stFeatures = numpy.array([], dtype=numpy.float64)
stFeatures = []
while (curPos + Win - 1 < N):
countFrames += 1
x = signal[curPos:curPos + Win]
curPos = curPos + Step
X = abs(fft(x))
X = X[0:nfft]
X = X / len(X)
Ex = 0.0
El = 0.0
X[0:4] = 0
# M = numpy.round(0.016 * fs) - 1
# R = numpy.correlate(frame, frame, mode='full')
stFeatures.append(stHarmonic(x, Fs))
# for i in range(len(X)):
#if (i < (len(X) / 8)) and (i > (len(X)/40)):
# Ex += X[i]*X[i]
#El += X[i]*X[i]
# stFeatures.append(Ex / El)
# stFeatures.append(numpy.argmax(X))
# if curFV[numOfTimeSpectralFeatures+nceps+1]>0:
# print curFV[numOfTimeSpectralFeatures+nceps], curFV[numOfTimeSpectralFeatures+nceps+1]
return numpy.array(stFeatures)
""" Feature Extraction Wrappers
- The first two feature extraction wrappers are used to extract long-term averaged
audio features for a list of WAV files stored in a given category.
It is important to note that, one single feature is extracted per WAV file (not the whole sequence of feature vectors)
"""
def dirWavFeatureExtraction(dirName, mtWin, mtStep, stWin, stStep, computeBEAT=False):
"""
This function extracts the mid-term features of the WAVE files of a particular folder.
The resulting feature vector is extracted by long-term averaging the mid-term features.
Therefore ONE FEATURE VECTOR is extracted for each WAV file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mtWin, mtStep: mid-term window and step (in seconds)
- stWin, stStep: short-term window and step (in seconds)
"""
allMtFeatures = numpy.array([])
processingTimes = []
types = ('*.wav', '*.aif', '*.aiff', '*.mp3','*.au')
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(dirName, files)))
wavFilesList = sorted(wavFilesList)
wavFilesList2 = []
for i, wavFile in enumerate(wavFilesList):
print("Analyzing file %s of %s: %s" %(i+1, len(wavFilesList), wavFile.encode('utf-8')))
if os.stat(wavFile).st_size == 0:
print(" (EMPTY FILE -- SKIPPING)")
continue
[Fs, x] = audioBasicIO.readAudioFile(wavFile) # read file
if isinstance(x, int):
continue
t1 = time.clock()
x = audioBasicIO.stereo2mono(x) # convert stereo to mono
if x.shape[0]<float(Fs)/10:
print(" (AUDIO FILE TOO SMALL - SKIPPING)")
continue
wavFilesList2.append(wavFile)
if computeBEAT: # mid-term feature extraction for current file
[MidTermFeatures, stFeatures] = mtFeatureExtraction(x, Fs, round(mtWin * Fs), round(mtStep * Fs), round(Fs * stWin), round(Fs * stStep))
[beat, beatConf] = beatExtraction(stFeatures, stStep)
else:
[MidTermFeatures, _] = mtFeatureExtraction(x, Fs, round(mtWin * Fs), round(mtStep * Fs), round(Fs * stWin), round(Fs * stStep))
MidTermFeatures = numpy.transpose(MidTermFeatures)
MidTermFeatures = MidTermFeatures.mean(axis=0) # long term averaging of mid-term statistics
if (not numpy.isnan(MidTermFeatures).any()) and (not numpy.isinf(MidTermFeatures).any()):
if computeBEAT:
MidTermFeatures = numpy.append(MidTermFeatures, beat)
MidTermFeatures = numpy.append(MidTermFeatures, beatConf)
if len(allMtFeatures) == 0: # append feature vector
allMtFeatures = MidTermFeatures
else:
allMtFeatures = numpy.vstack((allMtFeatures, MidTermFeatures))
t2 = time.clock()
duration = float(len(x)) / Fs
processingTimes.append((t2 - t1) / duration)
if len(processingTimes) > 0:
print("Feature extraction complexity ratio: {0:.1f} x realtime".format((1.0 / numpy.mean(numpy.array(processingTimes)))))
return (allMtFeatures, wavFilesList2)
def dirsWavFeatureExtraction(dirNames, mtWin, mtStep, stWin, stStep, computeBEAT=False):
'''
Same as dirWavFeatureExtraction, but instead of a single dir it takes a list of paths as input and returns a list of feature matrices.
EXAMPLE:
[features, classNames] =
a.dirsWavFeatureExtraction(['audioData/classSegmentsRec/noise','audioData/classSegmentsRec/speech',
'audioData/classSegmentsRec/brush-teeth','audioData/classSegmentsRec/shower'], 1, 1, 0.02, 0.02);
It can be used during the training process of a classification model ,
in order to get feature matrices from various audio classes (each stored in a seperate path)
'''
# feature extraction for each class:
features = []
classNames = []
fileNames = []
for i, d in enumerate(dirNames):
[f, fn] = dirWavFeatureExtraction(d, mtWin, mtStep, stWin, stStep, computeBEAT=computeBEAT)
if f.shape[0] > 0: # if at least one audio file has been found in the provided folder:
features.append(f)
fileNames.append(fn)
if d[-1] == "/":
classNames.append(d.split(os.sep)[-2])
else:
classNames.append(d.split(os.sep)[-1])
return features, classNames, fileNames
def dirWavFeatureExtractionNoAveraging(dirName, mtWin, mtStep, stWin, stStep):
"""
This function extracts the mid-term features of the WAVE files of a particular folder without averaging each file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mtWin, mtStep: mid-term window and step (in seconds)
- stWin, stStep: short-term window and step (in seconds)
RETURNS:
- X: A feature matrix
- Y: A matrix of file labels
- filenames:
"""
allMtFeatures = numpy.array([])
signalIndices = numpy.array([])
processingTimes = []
types = ('*.wav', '*.aif', '*.aiff')
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(dirName, files)))
wavFilesList = sorted(wavFilesList)
for i, wavFile in enumerate(wavFilesList):
[Fs, x] = audioBasicIO.readAudioFile(wavFile) # read file
if isinstance(x, int):
continue
x = audioBasicIO.stereo2mono(x) # convert stereo to mono
[MidTermFeatures, _] = mtFeatureExtraction(x, Fs, round(mtWin * Fs), round(mtStep * Fs), round(Fs * stWin), round(Fs * stStep)) # mid-term feature
MidTermFeatures = numpy.transpose(MidTermFeatures)
# MidTermFeatures = MidTermFeatures.mean(axis=0) # long term averaging of mid-term statistics
if len(allMtFeatures) == 0: # append feature vector
allMtFeatures = MidTermFeatures
signalIndices = numpy.zeros((MidTermFeatures.shape[0], ))
else:
allMtFeatures = numpy.vstack((allMtFeatures, MidTermFeatures))
signalIndices = numpy.append(signalIndices, i * numpy.ones((MidTermFeatures.shape[0], )))
return (allMtFeatures, signalIndices, wavFilesList)
# The following two feature extraction wrappers extract features for given audio files, however
# NO LONG-TERM AVERAGING is performed. Therefore, the output for each audio file is NOT A SINGLE FEATURE VECTOR
# but a whole feature matrix.
#
# Also, another difference between the following two wrappers and the previous is that they NO LONG-TERM AVERAGING IS PERFORMED.
# In other words, the WAV files in these functions are not used as uniform samples that need to be averaged but as sequences
def mtFeatureExtractionToFile(fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile,
storeStFeatures=False, storeToCSV=False, PLOT=False):
"""
This function is used as a wrapper to:
a) read the content of a WAV file
b) perform mid-term feature extraction on that signal
c) write the mid-term feature sequences to a numpy file
"""
[Fs, x] = audioBasicIO.readAudioFile(fileName) # read the wav file
x = audioBasicIO.stereo2mono(x) # convert to MONO if required
if storeStFeatures:
[mtF, stF] = mtFeatureExtraction(x, Fs, round(Fs * midTermSize), round(Fs * midTermStep), round(Fs * shortTermSize), round(Fs * shortTermStep))
else:
[mtF, _] = mtFeatureExtraction(x, Fs, round(Fs*midTermSize), round(Fs * midTermStep), round(Fs * shortTermSize), round(Fs * shortTermStep))
numpy.save(outPutFile, mtF) # save mt features to numpy file
if PLOT:
print("Mid-term numpy file: " + outPutFile + ".npy saved")
if storeToCSV:
numpy.savetxt(outPutFile+".csv", mtF.T, delimiter=",")
if PLOT:
print("Mid-term CSV file: " + outPutFile + ".csv saved")
if storeStFeatures:
numpy.save(outPutFile+"_st", stF) # save st features to numpy file
if PLOT:
print("Short-term numpy file: " + outPutFile + "_st.npy saved")
if storeToCSV:
numpy.savetxt(outPutFile+"_st.csv", stF.T, delimiter=",") # store st features to CSV file
if PLOT:
print("Short-term CSV file: " + outPutFile + "_st.csv saved")
def mtFeatureExtractionToFileDir(dirName, midTermSize, midTermStep, shortTermSize, shortTermStep, storeStFeatures=False, storeToCSV=False, PLOT=False):
types = (dirName + os.sep + '*.wav', )
filesToProcess = []
for files in types:
filesToProcess.extend(glob.glob(files))
for f in filesToProcess:
outPath = f
mtFeatureExtractionToFile(f, midTermSize, midTermStep, shortTermSize, shortTermStep, outPath, storeStFeatures, storeToCSV, PLOT)
|
[
"dinhkhoi1@gmail.com"
] |
dinhkhoi1@gmail.com
|
930f4299bfc22dbbfcef08432ba583f341185fe9
|
00bd7fcd18b67742a906d6215ea7644efbde1bb2
|
/kernighan_lin.py
|
abf56501722deab329d6560b6e8fe6ce992e3ada
|
[] |
no_license
|
steliosrousoglou/244-final
|
29218001edfc86d0d509e9d97ec68bf4e3b051fa
|
439b8ff046009952d703ffa01ea0d35dbd12837c
|
refs/heads/master
| 2022-10-13T19:17:46.135059
| 2020-06-12T13:44:35
| 2020-06-12T13:44:35
| 271,252,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,321
|
py
|
# -*- coding: utf-8 -*-
#
# kernighan_lin.py - Kernighan–Lin bipartition algorithm
#
# Copyright 2011 Ben Edwards <bedwards@cs.unm.edu>.
# Copyright 2011 Aric Hagberg <hagberg@lanl.gov>.
# Copyright 2015 NetworkX developers.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
"""Functions for computing the Kernighan–Lin bipartition algorithm."""
from collections import defaultdict
from itertools import islice
from operator import itemgetter
import networkx as nx
from networkx.utils import not_implemented_for
import numpy as np
__all__ = ['kernighan_lin_bisection']
def is_partition(G, communities):
"""Returns *True* if `communities` is a partition of the nodes of `G`.
A partition of a universe set is a family of pairwise disjoint sets
whose union is the entire universe set.
Parameters
----------
G : NetworkX graph.
communities : list or iterable of sets of nodes
If not a list, the iterable is converted internally to a list.
If it is an iterator it is exhausted.
"""
# Alternate implementation:
# return all(sum(1 if v in c else 0 for c in communities) == 1 for v in G)
if not isinstance(communities, list):
communities = list(communities)
nodes = set(n for c in communities for n in c if n in G)
return len(G) == len(nodes) == sum(len(c) for c in communities)
def _compute_delta(G, A, B, weight):
# helper to compute initial swap deltas for a pass
delta = defaultdict(float)
for u, v, d in G.edges(data=True):
w = d.get(weight, 1)
if u in A:
if v in A:
delta[u] -= w
delta[v] -= w
elif v in B:
delta[u] += w
delta[v] += w
elif u in B:
if v in A:
delta[u] += w
delta[v] += w
elif v in B:
delta[u] -= w
delta[v] -= w
return delta
def _update_delta(delta, G, A, B, u, v, weight):
# helper to update swap deltas during single pass
for _, nbr, d in G.edges(u, data=True):
w = d.get(weight, 1)
if nbr in A:
delta[nbr] += 2 * w
if nbr in B:
delta[nbr] -= 2 * w
for _, nbr, d in G.edges(v, data=True):
w = d.get(weight, 1)
if nbr in A:
delta[nbr] -= 2 * w
if nbr in B:
delta[nbr] += 2 * w
return delta
def _kernighan_lin_pass(G, A, B, weight):
# do a single iteration of Kernighan–Lin algorithm
# returns list of (g_i,u_i,v_i) for i node pairs u_i,v_i
multigraph = G.is_multigraph()
delta = _compute_delta(G, A, B, weight)
swapped = set()
gains = []
while len(swapped) < len(G):
gain = []
for u in A - swapped:
for v in B - swapped:
try:
if multigraph:
w = sum(d.get(weight, 1) for d in G[u][v].values())
else:
w = G[u][v].get(weight, 1)
except KeyError:
w = 0
gain.append((delta[u] + delta[v] - 2 * w, u, v))
if len(gain) == 0:
break
maxg, u, v = max(gain, key=itemgetter(0))
swapped |= {u, v}
gains.append((maxg, u, v))
delta = _update_delta(delta, G, A - swapped, B - swapped, u, v, weight)
return gains
@not_implemented_for('directed')
def kernighan_lin_bisection(G, partition=None, max_iter=10, weight='weight',
seed=None):
"""Partition a graph into two blocks using the Kernighan–Lin
algorithm.
This algorithm paritions a network into two sets by iteratively
swapping pairs of nodes to reduce the edge cut between the two sets.
Parameters
----------
G : graph
partition : tuple
Pair of iterables containing an initial partition. If not
specified, a random balanced partition is used.
max_iter : int
Maximum number of times to attempt swaps to find an
improvemement before giving up.
weight : key
Edge data key to use as weight. If None, the weights are all
set to one.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Only used if partition is None
Returns
-------
partition : tuple
A pair of sets of nodes representing the bipartition.
Raises
-------
NetworkXError
If partition is not a valid partition of the nodes of the graph.
References
----------
.. [1] Kernighan, B. W.; Lin, Shen (1970).
"An efficient heuristic procedure for partitioning graphs."
*Bell Systems Technical Journal* 49: 291--307.
Oxford University Press 2011.
"""
# If no partition is provided, split the nodes randomly into a
# balanced partition.
print(G.nodes())
print(partition)
if partition is None:
print(len(G.nodes()))
nodes = np.random.choice(G.nodes(), len(G.nodes()) // 2, replace=False)
h = len(nodes) // 2
partition = (nodes[:h], nodes[h:])
# Make a copy of the partition as a pair of sets.
try:
A, B = set(partition[0]), set(partition[1])
except:
raise ValueError('partition must be two sets')
if not is_partition(G, (A, B)):
raise nx.NetworkXError('partition invalid')
for i in range(max_iter):
# `gains` is a list of triples of the form (g, u, v) for each
# node pair (u, v), where `g` is the gain of that node pair.
gains = _kernighan_lin_pass(G, A, B, weight)
csum = list(nx.utils.accumulate(g for g, u, v in gains))
max_cgain = max(csum)
if max_cgain <= 0:
break
# Get the node pairs up to the index of the maximum cumulative
# gain, and collect each `u` into `anodes` and each `v` into
# `bnodes`, for each pair `(u, v)`.
index = csum.index(max_cgain)
nodesets = islice(zip(*gains[:index + 1]), 1, 3)
anodes, bnodes = (set(s) for s in nodesets)
A |= bnodes
A -= anodes
B |= anodes
B -= bnodes
return A, B
|
[
"steliosr@stanford.edu"
] |
steliosr@stanford.edu
|
eed035e496f5c2c366b9ef2dc9cd71733fb93472
|
000cfccad7e367d91a1d9a7961b3072bf2624a58
|
/test/test3.py
|
ad4257ddcb4e2f5a5e8cabaeb3b9e9d959463658
|
[] |
no_license
|
dachuant/print-pdf
|
4f9bb68a67079dd838785dc8da3836a2909773d2
|
07c791c298dc9237b49b56724f026ca88dc1713b
|
refs/heads/master
| 2020-07-29T20:42:27.719153
| 2019-09-21T08:41:54
| 2019-09-21T08:41:54
| 209,951,848
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
from pdf2image import convert_from_path
import os
print os.getcwd()
os.environ["PATH"] += os.pathsep + 'D:/project/python/print/poppler-0.68.0/bin/'
print os.environ["PATH"]
images = convert_from_path('D:\\project\\python\\print\\2.pdf')
for index, img in enumerate(images):
img.save('D:\\project\\python\\print\\out\\%s.png' % (index))
|
[
"dachuant@163.com"
] |
dachuant@163.com
|
1ecf8dccf480745015841d6e35cbca8ce0076814
|
c61145e8771724575f67ae5738dd6cbb9626a706
|
/blog/permissions.py
|
5495131200db82f79188dabaa9b8e47477738327
|
[] |
no_license
|
Seredyak1/test_task
|
1399dd082f4281ca6f72d036f4df4c1c6945dafe
|
a5d433b827df46ffa95dd6dd91245b204884674f
|
refs/heads/master
| 2020-04-16T08:03:04.521740
| 2019-01-16T09:33:47
| 2019-01-16T09:33:47
| 165,409,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
from rest_framework import permissions
class IsPostOwner(permissions.BasePermission):
"""
Object-level permission to only allow updating his own profile
PUT and DELETE methods just for the user, who is owner of Post
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.user == request.user
|
[
"sanya.seredyak@gmail.com"
] |
sanya.seredyak@gmail.com
|
6391f4b397b94798859a4bd942cda0713ae46dea
|
2de7c6584090daa7a11d07464a9c60eea36f1512
|
/datasets/sample_clean_class.py
|
56522e049dc3af43302bbad575815a25c8a0fc4f
|
[] |
no_license
|
bill86416/trojan_attack
|
26bb2e7050dbfce80e99f9127d957aa0f5900be8
|
818892221e4b4c556642016f28a1ab863c55ac7f
|
refs/heads/master
| 2022-11-07T01:27:34.125098
| 2020-06-29T22:14:38
| 2020-06-29T22:14:38
| 275,916,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,533
|
py
|
import numpy as np
import cv2
import pickle
import matplotlib.pyplot as plt
import random
import os
from tqdm import tqdm
def unpickle(file):
with open(file, 'rb') as fo:
dict1 = pickle.load(fo, encoding='latin1')
return dict1
def save_normal_img(data, index, pth):
R = data[0:1024].reshape(32,32)/255.0
G = data[1024:2048].reshape(32,32)/255.0
B = data[2048:].reshape(32,32)/255.0
img = np.dstack((R,G,B))
plt.imsave(pth + '/' + str(index) + '.png',img)
trn_x = None
trn_y = None
for i in range(1,6):
tmp_x = np.asarray(unpickle('./raw_data/cifar-10-batches-py/data_batch_'+str(i))['data']).astype(np.float64)
trn_x = tmp_x if trn_x is None else np.concatenate((trn_x, tmp_x), axis=0)
tmp_y = unpickle('./raw_data/cifar-10-batches-py/data_batch_'+str(i))['labels']
trn_y = tmp_y if trn_y is None else np.concatenate((trn_y, tmp_y), axis=0)
tst_x = np.asarray(unpickle('./raw_data/cifar-10-batches-py/test_batch')['data']).astype(np.float64)
tst_y = unpickle('./raw_data/cifar-10-batches-py/test_batch')['labels']
labels = unpickle('./raw_data/cifar-10-batches-py/batches.meta')['label_names']
selected_classes = ['airplane','automobile','frog','cat','ship']
selected_normal_datset = {}
normal_pth = './selected_clean_dataset'
if not os.path.exists(normal_pth):
os.mkdir(normal_pth)
# training
if not os.path.exists(normal_pth + '/train'):
os.mkdir(normal_pth + '/train')
for i in selected_classes:
if not os.path.exists(normal_pth + '/train' + '/' + i):
os.mkdir(normal_pth + '/train' + '/' + i)
for i in tqdm(range (trn_x.shape[0])):
cls = labels[trn_y[i]]
if cls in selected_classes:
if cls not in selected_normal_datset:
selected_normal_datset[cls] = 0
else:
selected_normal_datset[cls] += 1
save_normal_img(trn_x[i], selected_normal_datset[cls], normal_pth + '/train' + '/' + cls)
# testing
selected_normal_datset = {}
if not os.path.exists(normal_pth + '/test'):
os.mkdir(normal_pth + '/test')
for i in selected_classes:
if not os.path.exists(normal_pth + '/test' + '/' + i):
os.mkdir(normal_pth + '/test' + '/' + i)
for i in tqdm(range (tst_x.shape[0])):
cls = labels[tst_y[i]]
if cls in selected_classes:
if cls not in selected_normal_datset:
selected_normal_datset[cls] = 0
else:
selected_normal_datset[cls] += 1
save_normal_img(tst_x[i], selected_normal_datset[cls], normal_pth + '/test' + '/' + cls)
|
[
"bill86416@gmail.com"
] |
bill86416@gmail.com
|
5b7e0a0263f28b6e26fad36cbcca0e42ad259e96
|
23bbe9c7872180396684806ac4a2a0da498de029
|
/contactapp/migrations/0003_assitance_createsucce_social.py
|
72320ff8279b943bc889ce07073c9fa346cb5c6d
|
[] |
no_license
|
Zizou897/purzzle
|
12a13a144677df096855d3133e8bed3857ed6bca
|
fd8a8757f01734c84cf01668b478358b31c054f7
|
refs/heads/main
| 2023-04-29T04:59:42.190315
| 2021-05-25T16:47:56
| 2021-05-25T16:47:56
| 370,630,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,438
|
py
|
# Generated by Django 3.2.3 on 2021-05-22 22:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contactapp', '0002_newsletter'),
]
operations = [
migrations.CreateModel(
name='Assitance',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField()),
('title', models.CharField(max_length=250)),
('name', models.CharField(max_length=250)),
('phone', models.CharField(max_length=250)),
('icon', models.CharField(max_length=250)),
],
options={
'verbose_name': 'Assitance',
'verbose_name_plural': 'Assitances',
},
),
migrations.CreateModel(
name='CreateSucce',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField()),
('title', models.CharField(max_length=50)),
('sous_title', models.CharField(max_length=250)),
('description', models.TextField()),
],
options={
'verbose_name': 'CreateSucce',
'verbose_name_plural': 'CreateSucces',
},
),
migrations.CreateModel(
name='Social',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_update', models.DateTimeField(auto_now=True)),
('status', models.BooleanField()),
('name', models.CharField(max_length=250)),
('icon', models.CharField(max_length=250)),
],
options={
'verbose_name': 'CreateSucce',
'verbose_name_plural': 'CreateSucces',
},
),
]
|
[
"azeridwan10@gmail.com"
] |
azeridwan10@gmail.com
|
f7bb9c2c7637546d17ab231eaff8b8e9de225f3e
|
287984049908b76587a0d0acf8f129875ed4b99a
|
/navbar.py
|
08f302885117b26214b48f5b106cab12b82acd18
|
[] |
no_license
|
leonardtang/stonks
|
172235c80205a967d9e5a948e2c776a64fd7f18c
|
4f9a540ba5f19dd8dd7247239e9a511d08fd61de
|
refs/heads/master
| 2023-06-02T15:44:43.707253
| 2021-06-22T11:02:15
| 2021-06-22T11:02:15
| 359,015,232
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
import dash_bootstrap_components as dbc
def Navbar():
navbar = dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink("Stocks", href="/stocks")),
dbc.NavItem(dbc.NavLink("Crypto", href="/crypto")),
dbc.NavItem(dbc.NavLink("Volatility", href="/volatility")),
dbc.NavItem(dbc.NavLink("Pulse Check", href="/sentiment"))
],
brand="Stonks: A Leonard Tang Production",
brand_href="https://leonardtang.me",
sticky="top",
fluid=True
)
return navbar
|
[
"leonardgentwintang@gmail.com"
] |
leonardgentwintang@gmail.com
|
275cb2dc84ed3aebe31d43dcfb67ea7ee46bb73a
|
ddcebdfe77e095ff39e33ad44a39fd2c85b42701
|
/lab03_03.py
|
0423d2d2e20c7abda28b6d8fe955b54cc9fa5024
|
[] |
no_license
|
soizensun/python-lab-solution
|
08b793e4bce3f996f033ab1a9624e43ce69b6d6e
|
897f5e5051753f13e5448bb8edd2b09d3a017291
|
refs/heads/master
| 2021-08-28T14:44:28.867992
| 2017-12-12T13:30:32
| 2017-12-12T13:30:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
h = int(input('Enter number of hours: '))
m = int(input('Enter number of minutes: '))
if(h < 0 or m < 0 or m > 59):
print("Input Error")
else:
baht = 0
if(h == 0 and m <= 15):
print('No charge, thanks.')
else:
if(m > 0):
#if(m == 0)
h = h + 1
if(h <= 2):
baht = baht + 10
print('Total amount due is %d Bahts.'%baht)
elif(h > 2):
baht = baht + 10 + (10*(h-2))
print('Total amount due is %d Bahts.'%baht)
elif(m == 0):
if(h <= 2):
baht = baht + 10
print('Total amount due is %d Bahts.'%baht)
elif(h > 2):
baht = baht + 10 + (10*(h-2))
print('Total amount due is %d Bahts.'%baht)
|
[
"zozen@gmail.com"
] |
zozen@gmail.com
|
b372e8891c3a17c5a20bd4f2e5c2b6c8fde17955
|
89f47a87b780d0ab08bac724d519c7e35d6f85f9
|
/customerapp/migrations/0007_auto_20181018_1610.py
|
589e2cfe800d5f6c1051422916a58d4b7900e608
|
[] |
no_license
|
Bhavin55/project
|
df572486684d49aa8289df8a66c1cfe1705ee688
|
e94dce8ac350ca3a3cbfa576d5daf8bc9ddd55b9
|
refs/heads/master
| 2020-04-02T13:53:33.393427
| 2018-10-24T12:42:24
| 2018-10-24T12:42:24
| 154,501,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-18 16:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customerapp', '0006_auto_20181018_1604'),
]
operations = [
migrations.RemoveField(
model_name='vechilemodel',
name='id',
),
migrations.AlterField(
model_name='vechilemodel',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
),
]
|
[
"bhavinbharathan55@gmail.com"
] |
bhavinbharathan55@gmail.com
|
5d5b0ee5fecf06912ea0fbbf1e1bc3d9eb06a082
|
cefe901a9b77117ca6450da4d3a3389469d7e097
|
/Speaker.py
|
fdc4270655f03ca82ff16bf03402577819e9d834
|
[] |
no_license
|
saguileran/Scripts
|
ea5a0cbac6ac03e7c32db889db46f45c63a32f18
|
cb1443164bcd26d5ac2b036ed62cf8f6ec2916c1
|
refs/heads/master
| 2020-05-30T18:54:31.401635
| 2019-06-08T04:58:17
| 2019-06-08T04:58:17
| 189,910,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,982
|
py
|
####Este es un tutorial del libro de Alexander Hiam pagina 45 del PDF####
import time
from Adafruit_BBIO import PWM
from Notas import *
led_pin = "P9_14" #Pin donde se coloca un PWM
PWM.start(led_pin, 0, 60) #el segundo termino es the initial duty cycle, el tercero es la frecuencia
dt=0.1 #tiempo entre pulsos
#a=False #Apagar ciclo
a=True #Realiza el sonido
#Generando una onda
def wave(led_pin, frecuency, dt): ##)pin, frecuency, duration/2)
PWM.start(led_pin, 0, frecuency) #el segundo termino es the initial duty cycle, el tercero es la frecuencia
for level in range(0, 100):
PWM.set_duty_cycle(led_pin, level)
time.sleep(dt)
for level in range(100, 0, -1): ##
PWM.set_duty_cycle(led_pin, level)
time.sleep(dt)
def nowave(pin):
PWM.set_duty_cycle(led_pin, 0)
return()
#Generando una nota definida
melody = [C4, G3, G3, A3, G3, 0, B3, C4]
noteDurations=[4,8,8,4,4,4,4,4]
# iterate over the notes of the melody:
''' #para comentar varias lineas
for thisNote in range(len(melody)):
# to calculate the note duration, take one second divided by the note type.
#e.g. quarter note = 1000 / 4, eighth note = 1000/8, etc.
noteDuration = int(1000 / noteDurations[thisNote])
wave(led_pin, melody[thisNote], noteDuration)
# to distinguish the notes, set a minimum time between them.
# the note's duration + 30% seems to work well:
pauseBetweenNotes = int(noteDuration * 1.3)
time.sleep(pauseBetweenNotes)
# stop the wave playing:
nowave(led_pin)
'''
while(a):
for level in range(0, 100):
PWM.set_duty_cycle(led_pin, level)
time.sleep(dt)
for level in range(10, 0, -1):
PWM.set_duty_cycle(led_pin, level)
time.sleep(dt)
a=0
# a=int(input("Continuar 1, no continuar 0: ")) #Pide un valor al usuario
if a==1:
a=True
b=float(input("Frecuencia nueva: "))
PWM.start(led_pin, 0, b) #el segundo termino es the initial duty cycle, el tercero es la frecuencia
else: a=False
# except(KeyboardInterrupt):
PWM.cleanup()
|
[
"saguileran@unal.edu.co"
] |
saguileran@unal.edu.co
|
61cf6c2336cb0a0ead3430453c374718f1a68923
|
ae75d25cc4c1d2f77726f1346346ad3a3989696f
|
/code_examples/builtin_types/lists.py
|
b8a0455941ce4b93b64b2b97b48e2e32dd2d0d44
|
[] |
no_license
|
marcin-bakowski-intive/python3-training
|
6c6bda2e2301a563c6fe7f152c5c6478eedb9b43
|
1a150ab6825080d00a1b21560e08f33c219af7ac
|
refs/heads/master
| 2020-04-05T20:51:33.826683
| 2018-12-11T14:20:19
| 2018-12-12T07:31:43
| 157,197,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
#!/usr/bin/env python3
# https://docs.python.org/3/library/stdtypes.html#sequence-types-list-tuple-range
fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']
print("apple count: %s" % fruits.count('apple'))
print("tangerine count: %s" % fruits.count('tangerine'))
print("banana count: %s" % fruits.count('banana'))
print("banana index: %s" % fruits.index('banana'))
print("2nd banana index: %s" % fruits.index('banana', 4))
print("Fruit reversed: %s" % fruits.reverse())
fruits.append('grape')
print(fruits)
fruits.sort()
print("sorted fruits: %s" % fruits)
for fruit in fruits:
print("This is a %s" % fruit)
while fruits:
print("Let's eat %s" % fruits.pop())
print(fruits)
if not fruits:
print("There are no fruits")
fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']
print("First fruit: %s" % fruits[0])
print("Last fruit: %s" % fruits[-1])
print("There are %d fruits" % len(fruits))
print("Let's take first 2 fruits: %s" % fruits[:2])
print("Let's take last 3 fruits: %s" % fruits[-3:])
print("Let's take every second fruit: %s" % fruits[::2])
print("is plum in fruits: %s" % ("plum" in fruits))
|
[
"marcin.bakowski@intive.com"
] |
marcin.bakowski@intive.com
|
b2e5f4f3b6676dbdc8c2a546cf50b28f0f92ad08
|
b911528809d4b231abd1aab7892d4177dd323072
|
/helper.py
|
5f233862dfa6cad7099331131a8072f81a9c3bdc
|
[] |
no_license
|
welchrj/MAE6286
|
6339763826b1bf49dfd93c108b86244a493a8e01
|
080f02a281e2b2190120456e5a679d94b382e48f
|
refs/heads/master
| 2020-07-12T11:42:14.925631
| 2019-12-12T16:34:04
| 2019-12-12T16:34:04
| 204,810,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,500
|
py
|
"""
Helper functions for lessons of module 5 of Numerical-MOOC.
"""
import numpy
from matplotlib import pyplot, cm
from mpl_toolkits import mplot3d
def ftcs_neumann(u0, sigma, nt):
'''FTCS with neumann conditions'''
u = u0.copy()
for i in range(nt):
u = u.copy()
u[1:-1] = u[1:-1] + sigma*(u[:-2] - 2*u[1:-1] + u[2:])
u[-1] = u[-2]
return u
def laplace_solution(x, y, Lx, Ly):
"""
Computes and returns the analytical solution of the Laplace equation
on a given two-dimensional Cartesian grid.
Parameters
----------
x : numpy.ndarray
The gridline locations in the x direction
as a 1D array of floats.
y : numpy.ndarray
The gridline locations in the y direction
as a 1D array of floats.
Lx : float
Length of the domain in the x direction.
Ly : float
Length of the domain in the y direction.
Returns
-------
p : numpy.ndarray
The analytical solution as a 2D array of floats.
"""
X, Y = numpy.meshgrid(x, y)
p = (numpy.sinh(1.5 * numpy.pi * Y / Ly) /
numpy.sinh(1.5 * numpy.pi * Ly / Lx) *
numpy.sin(1.5 * numpy.pi * X / Lx))
return p
def poisson_solution(x, y, Lx, Ly):
"""
Computes and returns the analytical solution of the Poisson equation
on a given two-dimensional Cartesian grid.
Parameters
----------
x : numpy.ndarray
The gridline locations in the x direction
as a 1D array of floats.
y : numpy.ndarray
The gridline locations in the y direction
as a 1D array of floats.
Lx : float
Length of the domain in the x direction.
Ly : float
Length of the domain in the y direction.
Returns
-------
p : numpy.ndarray
The analytical solution as a 2D array of floats.
"""
X, Y = numpy.meshgrid(x, y)
p = numpy.sin(numpy.pi * X / Lx) * numpy.cos(numpy.pi * Y / Ly)
return p
def l2_norm(p, p_ref):
"""
Computes and returns the relative L2-norm of the difference
between a solution p and a reference solution p_ref.
If L2(p_ref) = 0, the function simply returns
the L2-norm of the difference.
Parameters
----------
p : numpy.ndarray
The solution as an array of floats.
p_ref : numpy.ndarray
The reference solution as an array of floats.
Returns
-------
diff : float
The (relative) L2-norm of the difference.
"""
l2_diff = numpy.sqrt(numpy.sum((p - p_ref)**2))
l2_ref = numpy.sqrt(numpy.sum(p_ref**2))
if l2_ref > 1e-12:
return l2_diff / l2_ref
return l2_diff
def poisson_2d_jacobi(p0, b, dx, dy, maxiter=20000, rtol=1e-6):
"""
Solves the 2D Poisson equation for a given forcing term
using Jacobi relaxation method.
The function assumes Dirichlet boundary conditions with value zero.
The exit criterion of the solver is based on the relative L2-norm
of the solution difference between two consecutive iterations.
Parameters
----------
p0 : numpy.ndarray
The initial solution as a 2D array of floats.
b : numpy.ndarray
The forcing term as a 2D array of floats.
dx : float
Grid spacing in the x direction.
dy : float
Grid spacing in the y direction.
maxiter : integer, optional
Maximum number of iterations to perform;
default: 20000.
rtol : float, optional
Relative tolerance for convergence;
default: 1e-6.
Returns
-------
p : numpy.ndarray
The solution after relaxation as a 2D array of floats.
ite : integer
The number of iterations performed.
conv : list
The convergence history as a list of floats.
"""
p = p0.copy()
conv = [] # convergence history
diff = rtol + 1.0 # initial difference
ite = 0 # iteration index
while diff > rtol and ite < maxiter:
pn = p.copy()
p[1:-1, 1:-1] = (((pn[1:-1, :-2] + pn[1:-1, 2:]) * dy**2 +
(pn[:-2, 1:-1] + pn[2:, 1:-1]) * dx**2 -
b[1:-1, 1:-1] * dx**2 * dy**2) /
(2.0 * (dx**2 + dy**2)))
# Dirichlet boundary conditions at automatically enforced.
# Compute and record the relative L2-norm of the difference.
diff = l2_norm(p, pn)
conv.append(diff)
ite += 1
return p, ite, conv
def plot_3d(x, y, p, label='$z$', elev=30.0, azim=45.0):
"""
Creates a Matplotlib figure with a 3D surface plot of the scalar field p.
Parameters
----------
x : numpy.ndarray
Gridline locations in the x direction as a 1D array of floats.
y : numpy.ndarray
Gridline locations in the y direction as a 1D array of floats.
p : numpy.ndarray
Scalar field to plot as a 2D array of floats.
label : string, optional
Axis label to use in the third direction;
default: 'z'.
elev : float, optional
Elevation angle in the z plane;
default: 30.0.
azim : float, optional
Azimuth angle in the x,y plane;
default: 45.0.
"""
fig = pyplot.figure(figsize=(8.0, 6.0))
ax = mplot3d.Axes3D(fig)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel(label)
X, Y = numpy.meshgrid(x, y)
ax.plot_surface(X, Y, p, cmap=cm.viridis)
ax.set_xlim(x[0], x[-1])
ax.set_ylim(y[0], y[-1])
ax.view_init(elev=elev, azim=azim)
|
[
"welchrj@gwmail.gwu.edu"
] |
welchrj@gwmail.gwu.edu
|
67342d9963c24dbb9002f5f01da016bfbeed01b0
|
f11a7316d46f1119a261da65abe0d45ffd37bf50
|
/app/addexpense.py
|
e760243220ddcad9a2918f21a13a642fa9ff3276
|
[
"MIT"
] |
permissive
|
327585419/ExpenseTracker
|
728202f904124846b4f39e844acb9532e336e6b9
|
05a96e714564325dbe30c1a368b1bba965d5d0ee
|
refs/heads/master
| 2022-02-26T16:53:58.970500
| 2019-11-03T05:31:11
| 2019-11-03T05:31:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,623
|
py
|
from datetime import datetime, timedelta
from kivy.uix.screenmanager import Screen
from kivy.uix.dropdown import DropDown
from kivy.app import App
from kivymd.pickers import MDDatePicker
from kivytoast import toast
from dtclasses import Items, Expenses
from additem import CustomFlatButton
add_expense = """
#:import MDTextField kivymd.textfields.MDTextField
<AddExpense>
item_name: item_name.__self__
value: value.__self__
submit: submit.__self__
lbl_message: lbl_message.__self__
datelabel : datelabel.__self__
daycalendar: daycalendar.__self__
BoxLayout:
orientation: 'vertical'
BoxLayout:
orientation: 'horizontal'
size_hint_y: .1
canvas.before:
Color:
rgba: app.theme_cls.primary_light
Rectangle:
size: self.size
pos: self.pos
MDIconButton:
icon: 'chevron-left'
size_hint_x: .15
on_press: root.left_arrow()
MDLabel:
id: datelabel
size_hint_x: .55
#on_text: root.refresh_list()
halign : 'center'
theme_text_color: "Primary"
MDIconButton:
icon: 'chevron-right'
size_hint_x: .15
on_press: root.right_arrow()
MDIconButton:
id: daycalendar
icon: 'calendar'
size_hint_x: .15
on_press: root.show_date_picker()
FloatLayout:
# orientation: 'vertical'
size_hint_y : .7
MDTextField:
id: item_name
# pos_hint: {'center_x': .5}
hint_text: 'Category/Sub-Category'
size_hint_x : .75
pos_hint: {'x': .1, 'y': .75}
normal_color: app.theme_cls.accent_light
foreground_color : app.theme_cls.text_color
on_text: root.get_category('field')
elevation: 10
helper_text_mode : 'on_focus'
helper_text : 'Type/Select from Dropdown'
input_filter: root.text_filter
max_text_length: 20
MDIconButton:
icon: 'chevron-down'
size_hint_x: .15
pos_hint: {'x': .85, 'y': .75}
on_release: root.get_category('button')
MDTextField:
id: value
size_hint_x : .75
pos_hint: {'x': .1, 'y': .55}
hint_text: 'Value'
normal_color: app.theme_cls.accent_light
foreground_color : app.theme_cls.text_color
elevation: 10
input_filter : 'float'
helper_text_mode : 'on_focus'
helper_text : 'Enter expense amount'
MDFillRoundFlatButton:
id: submit
pos_hint: {'x': .1, 'y': .3}
size_hint_x: .75
# width: 250
text: 'Submit'
on_release: root.add_expense()
MDLabel:
id: lbl_message
size_hint_x: .8
pos_hint: {'center_x': .5, 'center_y': .2}
halign: 'center'
theme_text_color: "Primary"
BoxLayout:
size_hint_y : .2
"""
class AddExpense(Screen):
expense_date = datetime.today()
cat_dropdown = DropDown()
def __init__(self, **kwargs):
self.name = "Add Expense"
self.app = App.get_running_app()
super(AddExpense, self).__init__()
self.cat_dropdown.bind(
on_select=lambda instance, x: setattr(self.item_name, "text", x)
)
self.cat_dropdown.width = self.item_name.width
def show_date_picker(self):
MDDatePicker(self.pick_date).open()
def pick_date(self, exp_date):
self.lbl_message.text = ""
self.datelabel.text = str(exp_date)
self.expense_date = exp_date
def add_expense(self):
item_name = self.item_name.text
value = self.value.text
exp_date = self.datelabel.text
if (
item_name is None
or item_name == ""
or value is None
or value == ""
or exp_date is None
or exp_date == ""
):
self.lbl_message.text = "Required input missing."
self.lbl_message.theme_text_color = "Error"
return
item_id = Items.get_item(item_name=item_name, item_link=None)
if item_id is None or item_id == 0:
self.lbl_message.text = (
"No Category/Sub-Category by this name. "
"Please create if required from Items screen."
)
self.lbl_message.theme_text_color = "Error"
return
if value == "0":
self.lbl_message.text = "Enter an amount not equal to 0"
self.lbl_message.theme_text_color = "Error"
return
expense_id = Expenses.get_next_exp_id()
kwargs = {
"expense_id": expense_id,
"item_id": item_id,
"value": float(value),
"date": exp_date,
}
Expenses.add_expense(**kwargs)
toast("Expense Added")
self.leave_screen()
def on_enter(self, *args):
self.expense_date = datetime.strptime(self.app.date, "%Y-%m-%d")
self.datelabel.text = self.app.date
self.item_name.text = ""
self.value.text = ""
self.item_name.focus = True
self.lbl_message.text = ""
def left_arrow(self):
self.expense_date = self.expense_date - timedelta(days=1)
self.datelabel.text = self.expense_date.strftime("%Y-%m-%d")
self.lbl_message.text = ""
def right_arrow(self):
self.expense_date = self.expense_date + timedelta(days=1)
self.datelabel.text = self.expense_date.strftime("%Y-%m-%d")
self.lbl_message.text = ""
def get_category(self, *args):
"""function to get items based on the search text entered by user"""
self.cat_dropdown.clear_widgets()
self.lbl_message.text = ""
if self.cat_dropdown.attach_to is not None:
self.cat_dropdown._real_dismiss()
item_name = self.item_name.text
item_dict = {}
if item_name is not None and item_name != "" and args[0] != "button":
item_dict = Items.get_items(item_name=item_name, item_type="all")
if args[0] == "button":
item_dict = Items.get_items(item_name="", item_type="all")
if item_dict != {}:
for key, value in item_dict.items():
self.cat_dropdown.add_widget(
CustomFlatButton(
text=value["item_name"],
on_release=lambda x: self.cat_dropdown.select(x.text),
md_bg_color=self.app.theme_cls.accent_light,
width=self.item_name.width,
)
)
self.cat_dropdown.open(self.item_name)
def text_filter(self, input_text, undo_flag):
if input_text.isalnum():
return input_text
else:
return
def leave_screen(self, *args):
self.app.screens.show_screen("Expenses")
def on_leave(self, *args):
self.cat_dropdown.clear_widgets()
|
[
"adityabhawsingka@gmail.com"
] |
adityabhawsingka@gmail.com
|
6642b80f27554a501506a6ad0f13c7179eccc798
|
2700a95438492d3c99b86ac5ff49806d79d5f3c7
|
/example/serializers.py
|
1f2c59651d71e0f8b15f7e04b73d0ea74f7c39c3
|
[
"BSD-2-Clause"
] |
permissive
|
kaldras/django-rest-framework-json-api
|
ea621a79d911715e383ff8d1d36e18bb452b5582
|
a4ae22ca4e8351e40e9e4a82c6c117f6beac9653
|
refs/heads/develop
| 2021-04-15T18:36:37.088458
| 2015-09-21T22:43:17
| 2015-09-21T22:43:17
| 42,941,620
| 0
| 0
| null | 2015-09-22T15:11:30
| 2015-09-22T15:11:30
| null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
from rest_framework import serializers
from example.models import Blog, Entry, Author
class BlogSerializer(serializers.ModelSerializer):
class Meta:
model = Blog
fields = ('name', )
class EntrySerializer(serializers.ModelSerializer):
class Meta:
model = Entry
fields = ('blog', 'headline', 'body_text', 'pub_date', 'mod_date',
'authors',)
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = Author
fields = ('name', 'email',)
|
[
"mail@unruhdesigns.com"
] |
mail@unruhdesigns.com
|
b14160cc6e4a410f235a915e100cb604c440c372
|
d45e2c58e57b22b48bf1b3c426c5e62ec7651c4c
|
/LECOUEDIC_TORTOSA_OLIVIER_TP1.py
|
66b9c66a8d39c79eeaba96420450c1a9e1b52faf
|
[] |
no_license
|
tlc10/Data-Analysis
|
0c6d5ff9866f9c6fdd4831401c381b568b1acd46
|
07d655d32a9c76eab8954792501fae308f3e482e
|
refs/heads/master
| 2022-09-25T19:26:59.364829
| 2020-06-02T15:10:40
| 2020-06-02T15:10:40
| 268,833,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,234
|
py
|
#LECOUEDIC Thomas
#TORTOSA Hugo
#RAPHAEL OLIVIER
#TP1 DATA ANALYSIS
import matplotlib as plt
import statistics
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import stats
from math import sqrt,pi,exp
from collections import Counter
#A
#1)
A = np.random.randint(0,10,1000)
#2)
plt.hist(A)
#3)
#mean
n = len(A)
S = sum(A)
mean = S / n
print(mean)
#median
A.sort()
if n % 2 == 0:
m1 = A[n//2]
m2 = n[n//2 - 1]
median = (m1 + m2)/2
else:
median = A[n//2]
print(median)
#mode
c = Counter(A)
get_mode = dict(c)
mode = [k for k, v in get_mode.items() if v == max(list(c.values()))]
if len(mode) == n:
get_mode = "No mode found"
else:
get_mode = "Mode is " + ', '.join(map(str, mode))
print(get_mode)
#4)
np.mean(A)
np.median(A)
#6)
#range
range1 = max(A)-min(A)
print(range1)
#variance
var=0
c=0
for k in range(0,n):
c+=(A[k]-mean)**2
var = c/n
print(var)
#standard_deviation
std = math.sqrt(c/n)
np.ptp(A)
np.var(A)
np.std(A)
#B
#1
dataset = [10,5,12,8,48,9,23,10,24,11,48,12,9,13,7,14,13,16]
ser = pd.Series(dataset[1::2],index=dataset[::2])
ser.plot.bar()
#2
#position
np.mean(ser.index)
max(ser.index)
np.median(ser.index)
min(ser.index)
#dispersion
np.std(ser.index)
np.ptp(ser.index)
np.var(ser.index)
statistics.mode(ser.index)
#C
#1
#2
sampleQI = np.random.normal(100,15,100000)
plt.hist(sampleQI)
#3
np.mean(sampleQI)
np.std(sampleQI)
#4
cnt1 = 0
for i in sampleQI:
if i<60:
cnt1+=1
cnt1/len(sampleQI)*100
#5
cnt2 = 0
for i in sampleQI:
if i>130:
cnt2+=1
cnt2/len(sampleQI)*100
#6
def IC(mean, std):
print('The percentage between', mean -1.96*std,"and",mean +1.96*std)
IC(np.mean(sampleQI),np.std(sampleQI))
#D
#1
sample1 = np.random.normal(100,15,10)
sample2 = np.random.normal(100,15,1000)
sample3 = np.random.normal(100,15,100000)
np.mean(sample1)
np.std(sample1)
np.mean(sample2)
np.std(sample2)
np.mean(sample3)
np.std(sample3)
IC(np.mean(sample1),np.std(sample1))
IC(np.mean(sample2),np.std(sample2))
IC(np.mean(sample3),np.std(sample3))
#2
dataset_malnutrition = read_csv(malnutrition.csv)
#3
np.mean(dataset_malnutrition)
np.std(datset_malnutrition)
IC(np.mean(dataset_malnutrition),np.std(dataset_malnutrition))
|
[
"noreply@github.com"
] |
tlc10.noreply@github.com
|
38c82483fb2383ea015666b29dbae06848e61afe
|
ea05617b5d33a641bb60b735e936e8f0ba6e57a7
|
/unittests/test_splash.py
|
fd230dd1991ad97954cad4ff5e86756b9c47c6f8
|
[] |
no_license
|
bbpatil/Phoenix
|
18716744f5a3f5dbd805520baf3edc14ebde9529
|
4d05434a6c9e9effb2ade8085e2bfa83775575ed
|
refs/heads/master
| 2022-02-23T21:40:34.510672
| 2016-06-12T05:26:06
| 2016-06-12T05:26:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 966
|
py
|
import unittest
import wtc
import wx
import wx.adv
import os
pngFile = os.path.join(os.path.dirname(__file__), 'toucan.png')
#---------------------------------------------------------------------------
class splash_Tests(wtc.WidgetTestCase):
def test_splash1(self):
wx.adv.SPLASH_CENTRE_ON_PARENT
wx.adv.SPLASH_CENTRE_ON_SCREEN
wx.adv.SPLASH_NO_CENTRE
wx.adv.SPLASH_TIMEOUT
wx.adv.SPLASH_NO_TIMEOUT
wx.adv.SPLASH_CENTER_ON_PARENT
wx.adv.SPLASH_CENTER_ON_SCREEN
wx.adv.SPLASH_NO_CENTER
def test_splash2(self):
splash = wx.adv.SplashScreen(wx.Bitmap(pngFile),
wx.adv.SPLASH_TIMEOUT|wx.adv.SPLASH_CENTRE_ON_SCREEN,
250, self.frame)
self.waitFor(300)
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
[
"robin@alldunn.com"
] |
robin@alldunn.com
|
391965ab758a9bc77d76205ceb946adf9d009a2d
|
af24f5e44068ddc91e05ecdbafac2d01755fd2e9
|
/blog/migrations/0028_auto_20200706_1507.py
|
33d3e828815ec09a8e24fb1bd42385e126ce098e
|
[] |
no_license
|
IsraJC/my-first-blog
|
a34b666fe09ae091c54c00e6121a6bd528fd22e1
|
84bc92a6cde1aec527600040cd382cd1853c1574
|
refs/heads/master
| 2023-08-12T18:42:21.745988
| 2021-10-04T19:50:09
| 2021-10-04T19:50:09
| 269,368,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
# Generated by Django 2.2.13 on 2020-07-06 14:07
from django.db import migrations, models
import django_mysql.models
class Migration(migrations.Migration):
dependencies = [
('blog', '0027_cv_skills'),
]
operations = [
migrations.AlterField(
model_name='cv',
name='skills',
field=django_mysql.models.ListCharField(models.CharField(max_length=100), default=[], max_length=1010, size=10),
),
]
|
[
"israchanna@gmail.com"
] |
israchanna@gmail.com
|
364e79d12f465bb9b2bc58ca1814c8753ec31f44
|
8984d05795a32d3ffb646dad2b7a3b1cb3d118ec
|
/OOO processor/code/assembler.py
|
7277841b7a008799eedb7ad7e7b4966f9541cfa1
|
[
"MIT"
] |
permissive
|
Shubhayu-Das/VL803-projects
|
3999782261c1ae1b07f5b418790a101c7d3d6d1c
|
895066e3449538bffd12413d93344515afed08c4
|
refs/heads/main
| 2023-05-06T10:10:05.139534
| 2021-05-21T18:23:34
| 2021-05-21T18:23:34
| 337,995,514
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,375
|
py
|
'''
MIT Licensed by Shubhayu Das, copyright 2021
Developed for Processor Architecture course assignment 1 - Tomasulo Out-Of-Order Machine
This is the script for a basic RISC-V assembler. It only support LW, ADD, SUB, MUL and DIV instructions so far.
All are integer instructions only, although, the program by itself supports floating point numbers(cheating)
'''
import re
import sys
# Function to split the instruction string into opcode and registers(and offset if needed)
# This function is capable of handling comments too
def split_operands(program):
program = [inst.split(";")[0] for inst in program]
program = list(filter(None, program))
program = [re.split(r",|\s", inst.strip()) for inst in program]
program = [[word.upper().replace('X', '') for word in inst if word]
for inst in program]
program = [inst for inst in program if inst]
return program
# Zero pad the binary numbers appropriately
def pad(number, n):
number = number[2:]
while len(number) < n:
number = "0" + number
return number
# The main assembler function, which contains the mapping between the instructions and their
# opcodes, function-7 and function-3 fields
def assembler(filename):
outFile = ".".join([filename.split("/")[-1].split(".")[0], "bin"])
program = []
assembly = []
mapping = {
"ADD": {
"funct7": "0000000",
"funct3": "000",
"opcode": "0110011"
},
"SUB": {
"funct7": "0100000",
"funct3": "000",
"opcode": "0110011"
},
"MUL": {
"funct7": "0000001",
"funct3": "000",
"opcode": "0110011"
},
"DIV": {
"funct7": "0000001",
"funct3": "100",
"opcode": "0110011"
},
"LW": {
"funct3": "010",
"opcode": "1010011"
},
}
# Read the source code
with open(filename) as sourceCode:
program = (sourceCode.readlines())
# Split each instruction into requisite pieces
program = split_operands(program)
# Decode the split chunks into binary
for i, inst in enumerate(program):
if "LW" in inst:
offset, rs1 = inst[2].split('(')
offset = pad(bin(int(offset)), 12)
rs1 = pad(bin(int(rs1.replace(')', ''))), 5)
rd = pad(bin(int(inst[1])), 5)
assembly.append(
offset + rs1 + mapping["LW"]["funct3"] + rd + mapping["LW"]["opcode"])
else:
rd = pad(bin(int(inst[1])), 5)
rs1 = pad(bin(int(inst[2])), 5)
rs2 = pad(bin(int(inst[3])), 5)
assembly.append(mapping[inst[0]]["funct7"] + rs2 + rs1 +
mapping[inst[0]]["funct3"] + rd + mapping[inst[0]]["opcode"])
# Write the assembled binary into an output bin file
with open(f"build/{outFile}", 'w') as destFile:
for idx, inst in enumerate(assembly):
destFile.write(inst)
if idx < len(assembly) - 1:
destFile.write("\n")
return f"build/{outFile}"
# Check if a program was fed it, otherwise use a default
if len(sys.argv) < 2:
print(f"Output generated to: {assembler('src/riscv_program.asm')}")
else:
print(f"Output generated to: {assembler(sys.argv[1])}")
|
[
"Shubhayu-Das@users.noreply.github.com"
] |
Shubhayu-Das@users.noreply.github.com
|
cb4e8da828ef302f7370bd82751ec3397ad3203c
|
d91bbff79ad7b13b7ddf0e3468c08a378c4d40e8
|
/flaskblog/__init__.py
|
98ef8cd44d17c742425d1c89ecb10e808ffa88c1
|
[] |
no_license
|
bugtijamal/flaskblog
|
07927f68f9766c127da6ee638501bf55fab0b467
|
46583df1e2861f82772edcc37ddf53f89c3e2acc
|
refs/heads/gh-pages
| 2022-11-07T22:43:10.846688
| 2019-05-08T11:16:16
| 2019-05-08T11:16:16
| 185,075,433
| 7
| 3
| null | 2022-10-06T01:38:36
| 2019-05-05T19:49:04
|
HTML
|
UTF-8
|
Python
| false
| false
| 579
|
py
|
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_msearch import Search
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
app.config['SECRET_KEY']='thisisatopsecretsforme'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=True
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
search = Search()
search.init_app(app)
login_manager = LoginManager(app)
login_manager.login_view = "login"
login_manager.login_message_category = "info"
from flaskblog import routes
|
[
"bugtijamal@gmail.com"
] |
bugtijamal@gmail.com
|
7c24f6c2100d2d03c1dcef8554a3eb2f465a6538
|
79b4b594208fa79f33fdf2ea9b2296a8ddf54087
|
/Social Media Ontology/flickr_api-0.4/flickr_api/keys.py
|
2d76be5e115034a94a1c300377b8ec20790e1e83
|
[] |
no_license
|
sheetalsh456/Crime-Ontology-Enrichment-Using-News-and-Social-Media
|
67b9121f9beb1d6ecb8e5539a032a9437875ffb1
|
5f6125a3c85867c598cadbe2baa84cc20e35a8fe
|
refs/heads/master
| 2020-03-20T21:10:59.932532
| 2018-06-18T08:33:50
| 2018-06-18T08:33:50
| 137,727,551
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
API_KEY = "c0fbe33d0663c2a99981d03c9d21d9a1"
API_SECRET = "8c76a2ad387c546e"
try:
import flickr_keys
API_KEY = flickr_keys.API_KEY
API_SECRET = flickr_keys.API_SECRET
except ImportError:
pass
def set_keys(api_key, api_secret):
global API_KEY, API_SECRET
API_KEY = api_key
API_SECRET = api_secret
|
[
"sheetalsh456@gmail.com"
] |
sheetalsh456@gmail.com
|
da01ce9e8217ad95f045cc6cdc61d65ad111b646
|
2d0eda15d99d945b9dfa25ccfa701fbdac70bf69
|
/invoicemanager/wsgi.py
|
ab0d00532585d25b976d24045f9ebdc998fa9506
|
[] |
no_license
|
Williano/Invoice-Management-System
|
9df1267f59bb1faaacb7b1864dbcf3face5cf549
|
7ad761973d1db913a166a9006a27042441c14f10
|
refs/heads/master
| 2022-10-27T19:44:47.177462
| 2022-09-26T03:42:56
| 2022-09-26T03:42:56
| 149,183,182
| 33
| 10
| null | 2022-04-26T02:08:52
| 2018-09-17T20:21:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""
WSGI config for invoicemanager project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "invoicemanager.settings")
application = get_wsgi_application()
|
[
"paawilly17@gmai.com"
] |
paawilly17@gmai.com
|
843e5f8cf59b3b2b9f8dd6abd7edf6e1dc8c0467
|
b888de0906d9055246e8a8f334f3de914cddb64b
|
/otp.py
|
a4a00b9cabe1abba727043163b8961764563c0e6
|
[] |
no_license
|
Tavrovskiy/7_Symmetric_ciphers
|
c45f3082ace845a20307b03b7a69bb302c635f3c
|
8509578cb767ce3a6ad866f570c08eca151876f9
|
refs/heads/master
| 2020-11-30T04:04:22.265193
| 2019-12-26T16:58:51
| 2019-12-26T16:58:51
| 230,296,650
| 0
| 0
| null | 2019-12-26T16:44:55
| 2019-12-26T16:44:55
| null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
def vernam (k, m):
lb, rb = 0, 65536
res = ''
for ind, s in enumerate(m):
cur_k = ord(k[ind % len(k)])
res += chr(ord(s) ^ cur_k)
return res
s = 'oh hi mark'
print("Исходная строка: " + s)
example1 = vernam('qwe', s)
print("\n Шифр:" + example1)
hack = vernam('qwe', example1)
print("\n Взлом шифра: " + hack)
|
[
"Alex_Tav@icloud.com"
] |
Alex_Tav@icloud.com
|
bf8279023591a8e93a533f29dfd93aeb05bed3d2
|
77e7bf85a0a39031de0ce11a095303a775700a91
|
/gis/parse_raw.py
|
f834c3d3839a5d3bb1286588703700118773eb2f
|
[] |
no_license
|
simrayyyy/uk-districts
|
ea8f86210b7d02f83c45d9116c12785b8d6557eb
|
fe18edcc4aa7d6dd8364ec9348c17b4e356ea9d9
|
refs/heads/master
| 2022-11-12T09:31:51.842471
| 2020-06-17T08:30:21
| 2020-06-17T08:30:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
import sys
import fiona
from shapely.geometry import Polygon
from shapely.geometry import MultiPolygon
import shapely.geometry
fn=sys.argv[1]
with fiona.open(fn) as uk_gis:
#print header
print("objectid,name,area,centroid_x, centroid_y")
for shape in uk_gis:
#objectid = shape["objectid"]
properties = shape["properties"]
objectid = properties["objectid"]
area_name = properties["lad17nm"]
geom = shape["geometry"]
if geom["type"] == 'MultiPolygon':
mp = MultiPolygon(shapely.geometry.shape(geom))
areas = []
for p in list(mp):
areas.append(p.area)
max_area = max(areas)
max_polygon = list(mp)[areas.index(max_area)]
areas.remove(max_area)
for a in areas:
if a >= max_area/50:
sys.stderr.write("Warning multi-polygon for objectid " \
+ str(objectid) + "\n")
centroid = max_polygon.centroid
print(str(objectid) + "," + "\"" + str(area_name) + "\"" + "," + \
str(max_polygon.area) + "," + \
str(centroid.x) + "," + str(centroid.y))
elif geom["type"] == 'Polygon':
p = Polygon(shapely.geometry.shape(geom))
centroid = p.centroid
print(str(objectid) + "," + "\"" + str(area_name) + "\"" + "," + \
str(p.area) + "," + \
str(centroid.x) + "," + str(centroid.y))
|
[
"pieter.libin@vub.ac.be"
] |
pieter.libin@vub.ac.be
|
6ed5ed6002d8c19eb9118c1f57f356dd2c2be4b8
|
e983ff3137b38a9a984c54466e76c530bf1e95de
|
/HomeControlWeb/common/templatetags/navbar.py
|
8af58895f6622e329c395ac3771cbe6595230440
|
[
"Apache-2.0"
] |
permissive
|
itamaro/home-control-web
|
43c75d312c06e746c78b4c13657675571c868e6c
|
de8b9020548e1634d3312a992e1a29351a780d2d
|
refs/heads/master
| 2016-09-06T04:47:47.107092
| 2013-10-30T11:31:40
| 2013-10-30T11:31:40
| 12,058,092
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
import urllib
from django import template
from django.core.urlresolvers import reverse
register = template.Library()
@register.simple_tag(takes_context=True)
def nav_item_active(context, lookup_view):
"Return 'active' if the `lookup_view` matches the active view"
return context['request'].path.startswith(reverse(lookup_view)) and \
'active' or ''
|
[
"itamarost@gmail.com"
] |
itamarost@gmail.com
|
cf3291941427d53935ae71d23e4d8c8974616f9a
|
2c60ab1041d60f2c4d0a92cb35f074a1f0a18cc9
|
/term2/particle_filters_python/5_important_weight.py
|
4f62368078b9c19053808428548cc7f50be781d5
|
[] |
no_license
|
haopo2005/SelfDrivingCar_Udacity
|
12f964fa28d68326da6e851fe54ac40b9a6c2eaa
|
70f8b7f3aba5128e40774773977af019476cbcb9
|
refs/heads/master
| 2020-03-28T08:54:17.842482
| 2018-12-29T05:49:30
| 2018-12-29T05:49:30
| 147,997,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,123
|
py
|
# Now we want to give weight to our
# particles. This program will print a
# list of 1000 particle weights.
#
# Don't modify the code below. Please enter
# your code at the bottom.
from math import *
import random
landmarks = [[20.0, 20.0], [80.0, 80.0], [20.0, 80.0], [80.0, 20.0]]
world_size = 100.0
class robot:
def __init__(self):
self.x = random.random() * world_size
self.y = random.random() * world_size
self.orientation = random.random() * 2.0 * pi
self.forward_noise = 0.0;
self.turn_noise = 0.0;
self.sense_noise = 0.0;
def set(self, new_x, new_y, new_orientation):
if new_x < 0 or new_x >= world_size:
raise ValueError, 'X coordinate out of bound'
if new_y < 0 or new_y >= world_size:
raise ValueError, 'Y coordinate out of bound'
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
def set_noise(self, new_f_noise, new_t_noise, new_s_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.forward_noise = float(new_f_noise);
self.turn_noise = float(new_t_noise);
self.sense_noise = float(new_s_noise);
def sense(self):
Z = []
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
dist += random.gauss(0.0, self.sense_noise)
Z.append(dist)
return Z
def move(self, turn, forward):
if forward < 0:
raise ValueError, 'Robot cant move backwards'
# turn, and add randomness to the turning command
orientation = self.orientation + float(turn) + random.gauss(0.0, self.turn_noise)
orientation %= 2 * pi
# move, and add randomness to the motion command
dist = float(forward) + random.gauss(0.0, self.forward_noise)
x = self.x + (cos(orientation) * dist)
y = self.y + (sin(orientation) * dist)
x %= world_size # cyclic truncate
y %= world_size
# set particle
res = robot()
res.set(x, y, orientation)
res.set_noise(self.forward_noise, self.turn_noise, self.sense_noise)
return res
def Gaussian(self, mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
return exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / sqrt(2.0 * pi * (sigma ** 2))
def measurement_prob(self, measurement):
# calculates how likely a measurement should be
prob = 1.0;
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
prob *= self.Gaussian(dist, self.sense_noise, measurement[i])
return prob
def __repr__(self):
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y), str(self.orientation))
#myrobot = robot()
#myrobot.set_noise(5.0, 0.1, 5.0)
#myrobot.set(30.0, 50.0, pi/2)
#myrobot = myrobot.move(-pi/2, 15.0)
#print myrobot.sense()
#myrobot = myrobot.move(-pi/2, 10.0)
#print myrobot.sense()
#### DON'T MODIFY ANYTHING ABOVE HERE! ENTER CODE BELOW ####
myrobot = robot()
myrobot = myrobot.move(0.1, 5.0)
Z = myrobot.sense()
N = 1000
p = []
for i in range(N):
x = robot()
x.set_noise(0.05, 0.05, 5.0)
p.append(x)
p2 = []
for i in range(N):
p2.append(p[i].move(0.1, 5.0))
p = p2
w = []
#insert code here!
# Now we want to give weight to our
# particles. This program will print a
# list of 1000 particle weights.
for i in range(N):
w.append(p[i].measurement_prob(Z))
print w #Please print w for grading purposes.
|
[
"haopo_2005@sina.com"
] |
haopo_2005@sina.com
|
ca186d71a8fd535aeb0e69a2140ae94c1919bccc
|
e886db017985b853b220ce75fec51b80943116c5
|
/pSp/criteria/w_norm.py
|
1a8c092d0d985b31860e6b5ec348a771c8f7605d
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pobbyleesh/TransStyleGAN
|
74b385a519e724f1e4bfa0052e9df72636eebe15
|
a1e6ca2be734c915ed27837726c0698d3f06f11b
|
refs/heads/main
| 2023-05-12T23:58:30.696052
| 2021-06-04T06:29:41
| 2021-06-04T06:29:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
import torch
from torch import nn
class WNormLoss(nn.Module):
def __init__(self, start_from_latent_avg=True):
super(WNormLoss, self).__init__()
self.start_from_latent_avg = start_from_latent_avg
def forward(self, latent, latent_avg=None):
if self.start_from_latent_avg:
latent = latent - latent_avg
return torch.sum(latent.norm(2, dim=(1, 2))) / latent.shape[0]
|
[
"noreply@github.com"
] |
pobbyleesh.noreply@github.com
|
4c9ead13981caf01a0870ccf697e8582ce3d52f7
|
64d9641602a2f9a32c8152267244a82622eeaa31
|
/plugins/logger.py
|
680eb77c74aff6b3818e9e91b0d970a40b872b89
|
[] |
no_license
|
rbistolfi/Lalita-plugins
|
b9134f15e8dca0fb7d56c387e4ed816d14b0b589
|
05a70ea1df1b1b491b38ab4501655b239c330e41
|
refs/heads/master
| 2021-01-25T10:00:03.104780
| 2010-02-05T00:01:03
| 2010-02-05T00:01:03
| 451,140
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,773
|
py
|
# -*- coding: utf8 -*-
u'''A RSS reader for the Lalita IRC bot.'''
from __future__ import with_statement
__author__ = 'rbistolfi'
__license__ = 'GPLv3'
import re
from lalita import Plugin
from time import gmtime
from twisted.internet import task
TRANSLATION_TABLE = {}
class Logger(Plugin):
"""A IRC channel Logguer for Lalita.
Log what is said in a channel and upload the thing to github.
"""
exclude = re.compile(r'^#')
def init(self, config):
"""Plugin intitalization."""
self.register_translation(self, TRANSLATION_TABLE)
self.register.events.PUBLIC_MESSAGE(self.push)
self.register.events.COMMAND(self.log, ['log'])
self.messages = []
# dispatch for subcommands
self.dispatch = {
'start': self.start,
'stop': self.stop,
'commit': self.commit,
}
# config
self.base_dir = self.config.get('base_dir', ".")
time_gap = self.config.get('time_gap', 3600.0)
# schedule
schedule = task.LoopingCall(self.commit)
schedule.start(time_gap, now=False) # call every X seconds
## Methods implementing the user interface
def log(self, user, channel, command, *args):
u"""Upload the channel log to github. Usage: @log [start, stop, commit]"""
usage = u'@log [start, stop, commit]'
if args[0] in self.dispatch:
self.dispatch[args[0]](user, command, channel, *args)
else:
self.say(channel, u'%s: Usage: %s', user, usage)
def start(self, user, channel, command, *args):
"""Start logging the channel."""
pass
def stop(self, user, channel, command, *args):
"""Stop logging a channel."""
pass
def commit(self, user, channel, command, *args):
"""Force a commit to github right now. A user is able to save the log
even at non scheduled time."""
pass
## Methods implementing string handling
def push(self, user, channel, message):
"""Push a message to the buffer."""
date = "GMT %r-%r-%r %r:%r:%r" % gmtime()[:6]
self.messages.get('channel', []).append((date, user, message))
def format(self, message):
"""Gives format to a message."""
return "[%s] %s: %s" % message
## Methods implementing git backend
def git_init_repository(self):
"""Initializes a git repository. Checks if a .git directory exists in
the configured location and creates a new repository if it doesnt."""
pass
def git_commit(self):
"""Executes git commit command."""
pass
def git_push(self):
"""Executes git push command."""
pass
|
[
"rbistolfi@gmail.com"
] |
rbistolfi@gmail.com
|
d79c566868e846226e47d3a05785c4b99b181782
|
5c5e600c36f485ed53f4830e8c2aae0905a85a9d
|
/zuker/stu/env_scrapy_python3.5.4/bin/ckeygen
|
9b918a5d3771de4f10104f1d4e81c5a96914a6b2
|
[] |
no_license
|
njxshr/codes
|
f60041451407396c3f529c993af8c7c13e6a3518
|
9621a496eaa2fdec43e646840ed2e889dc9ea2e8
|
refs/heads/master
| 2022-11-09T14:29:06.700324
| 2018-06-09T12:41:46
| 2018-06-09T12:41:46
| 106,446,382
| 3
| 1
| null | 2022-10-18T17:54:37
| 2017-10-10T17:03:28
|
Python
|
UTF-8
|
Python
| false
| false
| 263
|
#!/Users/lee/.virtualenvs/article_spider/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from twisted.conch.scripts.ckeygen import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"lizhao2@chanjet.com"
] |
lizhao2@chanjet.com
|
|
c8a65b947246bab93ba35b55e2700ac66f889490
|
13fe0f02f829062cb5d3870d1f207eb8df1de8c0
|
/DataReader.py
|
8ad5fef3b49b57caf3192633291c1e6c54283bbf
|
[] |
no_license
|
CillianWang/ENN
|
d00c6712f066201eb151eb80381aa1b00cba3af5
|
2efdd739d12ec0280216745706a9d5f956d6d3fe
|
refs/heads/main
| 2023-08-07T19:37:56.939146
| 2021-10-08T08:52:01
| 2021-10-08T08:52:01
| 396,851,931
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,761
|
py
|
import mne
import os
folder = "haaglanden-medisch-centrum-sleep-staging-database-1.0.0\haaglanden-medisch-centrum-sleep-staging-database-1.0.0/recordings"
filenames = os.listdir(folder)
data_list = []
recording_list = []
index = 0
# read data.edf and recording.txt, as recording.edf contains strange floats
for file in filenames:
if index%3 == 0:
data_list.append(file)
if index%3 == 2:
recording_list.append(file)
index += 1
if len(data_list) != len(recording_list):
print("Data and recording not aligned")
else:
print("Length of data:"+str(len(data_list))+", and length of recordings:"+str(len(recording_list)))
def read_data(filename):
data = mne.io.read_raw_edf(filename)
raw_data = data.get_data()
return raw_data
def read_recordings(filename):
f = open(filename)
rec = [line.strip().split(',') for line in f]
return rec
pass
# save data(cut)
a = read_data(folder+"\\"+data_list[0])
b = read_recordings(folder+"\\"+recording_list[0])
import numpy as np
np.save('a', a)
c = np.load('a.npy')
def data_cut(data, recording, index):
path = "data_cut\\"+str(index)
if os.path.isdir(path)==False:
os.makedirs(path)
n = len(recording)
for i in range(n):
section = data[:,i*30*256:(i+1)*30*256]
if os.path.isdir(path+"\\data")==False:
os.makedirs(path+"\\data")
np.save(path+"\\data\\"+"data_"+str(i), section)
truth = recording[i]
if os.path.isdir(path+"\\truth")==False:
os.makedirs(path+"\\truth")
np.save(path+"\\truth\\"+"truth_"+str(i), truth)
for i in range(154):
a = read_data(folder+"\\"+data_list[i])
b = read_recordings(folder+"\\"+recording_list[i])
data_cut(a,b,i)
pass
|
[
"x.wang3@student.tue.nl"
] |
x.wang3@student.tue.nl
|
ca787f26d532cdb9922b41df3344d8a755d4d59a
|
2410c26369f097c7725af747f64ab4d429103578
|
/aplicacion/aplicacion/settings.py
|
eb9f51bba47e1d8f20826bd565c8461ebe8ea996
|
[] |
no_license
|
miguelUGR/TFG
|
937d69a9aa1f9625b8e65d254c994f7bbe8b4d27
|
2a5fc78f03c7bc739e26896a990bc9d354cacbb0
|
refs/heads/master
| 2022-12-15T19:44:21.104448
| 2020-07-03T15:43:46
| 2020-07-03T15:43:46
| 215,865,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,239
|
py
|
"""
Django settings for aplicacion project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'zjfv$ys!&--!wds!$#2ub*yb0yhvc+ke9eic^1jtq=0sf!r$k0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True #si ponemos TRUE, muestra la pagina error por defecto y no es conveniente que indice a nadie las urls disponibles
#ALLOWED_HOSTS = []
ALLOWED_HOSTS = ['*'] #añadimos '*' para que arranque desde cualquier lado
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites', # new p6
'allauth', # new p6
'allauth.account', # new p6 AÑADE Email Addresses login.html (account_signup...etc) son las direcciones propias de DJANGO
'allauth.socialaccount', # new p6s
'desarrollo', #nuevo
'django_cleanup.apps.CleanupConfig', # PARA ELIMINACION AUTOMATICA imagen de Observaciones,
'datetimewidget',
]
SITE_ID = 1 #IMPORTANTISsIMO TENERLO PARA ENTRAR EN /admin
AUTH_USER_MODEL = 'desarrollo.Usuario' #Esto es para poder coger el modelo usuario creado a partir del propio de django
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'aplicacion.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')], #nuevo
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
#------------------------P6----------------------------------
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
LOGIN_REDIRECT_URL = "base" # esto es en caso de que haga el login correctamente te manda donde digas
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' #para que no pete cuando metes un correo, pk ten manda un correo de autentificacion en modo pro cuando metes un correo
ACCOUNT_EMAIL_REQUIRED = True #para que cuando me registre no sea opcional el correo
ACCOUNT_FORMS ={ 'signup':'desarrollo.forms.MiSignupForm',} #para que coja la clase y añada los campos que queremos para registrarse desde la web
# LOGIN_URL = '/account/login/'
#-------------------------------------------------------------
#----Carpeta para la imagen (ImageField)------------------------
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#-------------------------------------------------------------
WSGI_APPLICATION = 'aplicacion.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
#------------------------------------COMENTAMOS LO SIGUIENTE-----------------------------
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us' #Lo pongo en ingles por problemas de nº con comas(40,0239), que lo quiero con puntos(40.0239) en los modelos
# LANGUAGE_CODE = 'es-es'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')] #busque los archivos en static
|
[
"breva75@hotmail.com"
] |
breva75@hotmail.com
|
91f6eb8a75fe82fa8ce346dc0e1f140bb08367a3
|
259bf9a65e399156148e140d3ce8d15adf9d4b88
|
/business.py
|
ecfce8ca96b0aaeec562fb0a0e19a41d0640002b
|
[] |
no_license
|
NOORMOHIDEEN/Salmon-quantity-and-its-export
|
d20726ed405b180471ec2da6cca9e2b29f5ab439
|
eab156b85f35e8923da480b9a34a74c69b5c9a7d
|
refs/heads/main
| 2023-04-18T09:59:09.438174
| 2021-05-03T14:02:53
| 2021-05-03T14:02:53
| 363,947,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
import pandas as pd
def get_data():
df = pd.read_csv('data.csv')
print(df['Year'].tolist())
year = df['Year'].tolist()
SalmonQuantity_data = df['SalmonQuantity'].tolist()
Expo_data = df['Expo'].tolist()
# print(df['quebec'].tolist())
result_dict = {
'year' : year,
'SalmonQuantity' : SalmonQuantity_data,
'Expo' : Expo_data
}
# print(result_dict)
return result_dict
def add_row(year, SalmonQuantity,Expo ):
df = pd.read_csv('data.csv')
new_row = {
'Year' : year,
'SalmonQuantity' : SalmonQuantity,
'Expo' : Expo
}
print(df)
df = df.append(new_row, ignore_index=True)
print(df)
df.to_csv('data.csv')
if __name__ == "__main__":
get_data()
|
[
"noreply@github.com"
] |
NOORMOHIDEEN.noreply@github.com
|
3f5227799880ede2f6a93b4a8d18f1d01d2ab6ca
|
64c07601b745c0be2c89deb43d543f2ccf1420d0
|
/learning/models.py
|
23d5093d0400544bebba52bae484dc2a358b14bf
|
[] |
no_license
|
shisz/UWB_ML
|
2c9617d35673279f41e2f28680a0ee0b145ed79d
|
edd3eeafb8240d7acb95c2992cf6546007d2d5f5
|
refs/heads/master
| 2022-04-02T12:03:20.012832
| 2020-01-17T15:20:58
| 2020-01-17T15:20:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,069
|
py
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import *
from torch.autograd import Variable
from learning.npn import NPNLinear
from learning.npn import NPNRelu
from learning.npn import NPNSigmoid
from learning.npn import NPNDropout
from learning.npn import KL_loss
from learning.npn import L2_loss
from learning.deconv_block import *
class BottleNeck1d_3(nn.Module):
"""
ResNet 3 conv residual block
batchnorm + preactivation
dropout used when net is wide
"""
def __init__(self, in_channels, hidden_channels, out_channels, stride, kernel_size, group_num=1, use_bn=True):
super(BottleNeck1d_3, self).__init__()
self.stride = stride
self.in_channels = in_channels
self.out_channels = out_channels
self.use_bn = use_bn
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0)
self.conv1 = nn.Conv1d(in_channels, hidden_channels, kernel_size=1, stride=1,
padding=0)
self.bn1 = nn.BatchNorm1d(hidden_channels)
self.conv2 = nn.Conv1d(hidden_channels, hidden_channels, kernel_size=kernel_size, stride=stride,
padding=(kernel_size - 1) // 2, groups=group_num)
self.bn2 = nn.BatchNorm1d(hidden_channels)
self.conv3 = nn.Conv1d(hidden_channels, out_channels, kernel_size=1, stride=1,
padding=0)
self.bn3 = nn.BatchNorm1d(out_channels)
def forward(self, x):
if self.stride != 1 or self.in_channels != self.out_channels:
y = self.shortcut(x)
else:
y = x
if self.use_bn:
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
else:
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.conv3(x)
x = F.relu(x + y)
return x
class Enc(nn.Module):
def __init__(self, args):
super(Enc, self).__init__()
self.type = args.enc_type
self.fc_drop = 0.5
if self.type == 'mlp':
width = 64
self.fc1 = nn.Linear(INPUT_DIM, width)
self.fc2 = nn.Linear(width, width)
self.fc3 = nn.Linear(width, 1)
# self.dropout1 = nn.Dropout(p=0.2)
# self.dropout2 = nn.Dropout(p=0.2)
self.bn1 = nn.BatchNorm1d(width)
self.bn2 = nn.BatchNorm1d(width)
elif self.type == 'npn':
width = 128
self.fc1 = NPNLinear(INPUT_DIM, width, dual_input=False, first_layer_assign=True)
self.nonlinear1 = NPNRelu()
# self.dropout1 = NPNDropout(self.fc_drop)
self.fc2 = NPNLinear(width, 1)
# self.nonlinear2 = NPNSigmoid()
elif self.type == 'cnn':
width = 8
use_bn=False
self.conv0 = nn.Conv1d(1, width, kernel_size=3, stride=2, padding=5)
self.block1 = BottleNeck1d_3(in_channels=width, hidden_channels=width//2,
out_channels=width * 2, stride=2, kernel_size=3, group_num=width//4, use_bn=use_bn)
self.block2 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width//2,
out_channels=width * 2, stride=2, kernel_size=3, group_num=width//4, use_bn=use_bn)
self.block3 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=3, group_num=width//2, use_bn=use_bn)
self.block4 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=3, group_num=width//2, use_bn=use_bn)
self.block5 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=3, group_num=width//2, use_bn=use_bn)
# 16 left
self.pooling = nn.AvgPool1d(kernel_size=4, stride=4)
# 4 left
self.fc1 = nn.Linear(width * 4 * 4, width * 4 * 4)
self.dropout1 = nn.Dropout(p=0.2)
self.dropout2 = nn.Dropout(p=0.2)
self.fc2 = nn.Linear(width * 4 * 4, width * 4 * 4)
self.fc3 = nn.Linear(width * 4 * 4, 1)
elif self.type == 'cnn1':
width = args.cnn_width
self.conv0 = nn.Conv1d(1, width, kernel_size=3, stride=2, padding=4)
self.block0 = BottleNeck1d_3(in_channels=width, hidden_channels=width // 4,
out_channels=width, stride=2, kernel_size=3, group_num=width // 8)
self.block1 = BottleNeck1d_3(in_channels=width, hidden_channels=width//2,
out_channels=width * 2, stride=2, kernel_size=3, group_num=width//4)
self.block2 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width//2,
out_channels=width * 2, stride=2, kernel_size=3, group_num=width//4)
self.block3 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=3, group_num=width//2)
self.block4 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=3, group_num=width//2)
self.block5 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=3, group_num=width//2)
# 16 left
self.pooling = nn.AvgPool1d(kernel_size=8, stride=8)
self.fc1 = nn.Linear(width * 4, 1)
elif self.type == 'cnn2':
width = 8
use_bn = False
self.conv0 = nn.Conv1d(1, width, kernel_size=3, stride=2, padding=5)
self.block1 = BottleNeck1d_3(in_channels=width, hidden_channels=width,
out_channels=width * 2, stride=2, kernel_size=3, group_num=width // 4,
use_bn=use_bn)
self.block2 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width,
out_channels=width * 2, stride=2, kernel_size=3, group_num=width // 4,
use_bn=use_bn)
self.block3 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width*2,
out_channels=width * 4, stride=2, kernel_size=3, group_num=width // 2,
use_bn=use_bn)
self.block4 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width*2,
out_channels=width * 4, stride=2, kernel_size=3, group_num=width // 2,
use_bn=use_bn)
self.block5 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width*4,
out_channels=width * 8, stride=2, kernel_size=3, group_num=width,
use_bn=use_bn)
self.block6 = BottleNeck1d_3(in_channels=width * 8, hidden_channels=width * 4,
out_channels=width * 8, stride=2, kernel_size=3, group_num=width,
use_bn=use_bn)
# 8 left
self.pooling = nn.AvgPool1d(kernel_size=8, stride=8)
# 4 left
self.fc1 = nn.Linear(width * 8, width * 8)
self.dropout1 = nn.Dropout(p=0.2)
self.dropout2 = nn.Dropout(p=0.2)
self.fc2 = nn.Linear(width * 8, width * 8)
self.fc3 = nn.Linear(width * 8, 1)
elif self.type == 'combined':
width = 16
self.conv0 = nn.Conv1d(1, width, kernel_size=5, stride=2, padding=2)
self.block1 = BottleNeck1d_3(in_channels=width, hidden_channels=width//2,
out_channels=width * 2, stride=2, kernel_size=3, group_num=width//4)
self.block2 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width//2,
out_channels=width * 2, stride=2, kernel_size=3, group_num=width//4)
self.block3 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=3, group_num=width//2)
self.block4 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=3, group_num=width//2)
self.block5 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=3, group_num=width//2)
# 16 left
self.pooling = nn.AvgPool1d(kernel_size=8, stride=8)
self.fc1 = NPNLinear(width * 8, width * 16, dual_input=False, first_layer_assign=False)
self.nonlinear1 = NPNRelu()
# self.dropout1 = NPNDropout(self.fc_drop)
self.fc2 = NPNLinear(width * 16, 1)
elif self.type == 'combined_dis':
width = 16
kernel_size = 3
self.conv0 = nn.Conv1d(1, width, kernel_size=5, stride=2, padding=2)
self.block1 = BottleNeck1d_3(in_channels=width, hidden_channels=width//2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width//4)
# self.block1_1 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width // 2,
# out_channels=width * 2, stride=1, kernel_size=kernel_size,
# group_num=width // 4)
self.block2 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width//2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width//4)
# self.block2_1 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width // 2,
# out_channels=width * 2, stride=1, kernel_size=kernel_size,
# group_num=width // 4)
self.block3 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width//2)
# self.block3_1 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
# out_channels=width * 4, stride=1, kernel_size=kernel_size,
# group_num=width // 2)
self.block4 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width//2)
# self.block4_1 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
# out_channels=width * 4, stride=1, kernel_size=kernel_size,
# group_num=width // 2)
self.block5 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width//2)
# 16 left
self.pooling = nn.AvgPool1d(kernel_size=8, stride=8)
self.fc1 = NPNLinear(width * 4 * 2 + 1, width * 4 * 4, dual_input=False, first_layer_assign=False)
self.nonlinear1 = NPNRelu()
# self.dropout1 = NPNDropout(self.fc_drop)
self.fc2 = NPNLinear(width * 4 * 4, 1)
def forward(self, x):
if self.type == 'mlp':
x = F.relu(self.fc1(x))
# x = self.dropout1(x)
x = F.relu(self.fc2(x))
# x = self.dropout2(x)
x = self.fc3(x)
return x
elif self.type == 'npn':
x = self.nonlinear1(self.fc1(x))
# x = self.dropout1(x)
x = self.fc2(x)
# x, s = self.nonlinear2(x)
a_m, a_s = x
return a_m, a_s
elif self.type == 'cnn':
x = x.unsqueeze(1)
x = self.conv0(x)
x = self.block1.forward(x)
x = self.block2.forward(x)
x = self.block3.forward(x)
x = self.block4.forward(x)
x = self.block5.forward(x)
x = self.pooling(x)
x_size = x.size()
# x = x.squeeze(2)
x = x.view(x_size[0], x_size[1] * x_size[2])
x = F.relu(self.fc1(x))
x = self.dropout1(x)
x = F.relu(self.fc2(x))
x = self.dropout2(x)
x = self.fc3(x)
return x
elif self.type == 'cnn1':
x = x.unsqueeze(1)
x = self.conv0(x)
x = self.block0.forward(x)
x = self.block1.forward(x)
x = self.block2.forward(x)
x = self.block3.forward(x)
x = self.block4.forward(x)
x = self.block5.forward(x)
x = self.pooling(x)
x = x.squeeze(2)
x = self.fc1(x)
return x
elif self.type == 'cnn2':
x = x.unsqueeze(1)
x = self.conv0(x)
# x = self.block0.forward(x)
x = self.block1.forward(x)
x = self.block2.forward(x)
x = self.block3.forward(x)
x = self.block4.forward(x)
x = self.block5.forward(x)
x = self.block6.forward(x)
x = self.pooling(x)
x = x.squeeze(2)
x = F.relu(self.fc1(x))
x = self.fc3(x)
return x
elif self.type == 'combined':
x = x.unsqueeze(1)
x = self.conv0(x)
x = self.block1.forward(x)
x = self.block2.forward(x)
x = self.block3.forward(x)
x = self.block4.forward(x)
x = self.block5.forward(x)
x = self.pooling(x)
x = x.squeeze(2)
x = self.nonlinear1(self.fc1(x))
# x = self.dropout1(x)
x = self.fc2(x)
a_m, a_s = x
return a_m, a_s
elif self.type == 'combined_dis':
wave, dis = x
x = wave.unsqueeze(1)
dis = dis.unsqueeze(1)
x = self.conv0(x)
x = self.block1.forward(x)
# x = self.block1_1.forward(x)
x = self.block2.forward(x)
# x = self.block2_1.forward(x)
x = self.block3.forward(x)
# x = self.block3_1.forward(x)
x = self.block4.forward(x)
# x = self.block4_1.forward(x)
x = self.block5.forward(x)
x = self.pooling(x)
# x = x.squeeze(2)
x_size = x.size()
x = x.view(x_size[0], x_size[1] * x_size[2])
x = torch.cat((dis, x), dim=1)
x = self.nonlinear1(self.fc1(x))
# x = self.dropout1(x)
x = self.fc2(x)
a_m, a_s = x
return a_m, a_s
class VaeEnc(nn.Module):
def __init__(self, args):
super(VaeEnc, self).__init__()
self.type = args.enc_type
if self.type == 'vae':
self.width = 32
width = self.width
kernel_size = 5
self.conv0 = nn.Conv1d(1, width, kernel_size=kernel_size, stride=2, padding=4+2)
self.block1 = BottleNeck1d_3(in_channels=width, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.block2 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.block3 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.block4 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.block5 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
# 16 left
self.pooling = nn.AvgPool1d(kernel_size=2, stride=2)
self.conv_fc = nn.Conv1d(width * 4, width * 4, kernel_size=1, stride=1, padding=0)
self.fc1 = NPNLinear(width * 2 * 8 + 1, width * 4)
self.nonlinear1 = NPNRelu()
# self.dropout1 = NPNDropout(self.fc_drop)
self.fc2 = NPNLinear(width * 4, 1)
elif self.type == 'vae_1':
self.width = 32
width = self.width
kernel_size = 5
self.conv0 = nn.Conv1d(1, width, kernel_size=kernel_size, stride=2, padding=4+2)
self.block1 = BottleNeck1d_3(in_channels=width, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.block2 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.block3 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.block4 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.block5 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.block6 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width * 2,
out_channels=width * 8, stride=2, kernel_size=kernel_size,
group_num=width)
self.block7 = BottleNeck1d_3(in_channels=width * 8, hidden_channels=width * 2,
out_channels=width * 8, stride=2, kernel_size=kernel_size,
group_num=width)
# 16 left
self.pooling = nn.AvgPool1d(kernel_size=4, stride=4)
self.conv_fc = nn.Conv1d(width * 8, width * 8, kernel_size=1, stride=1, padding=0)
self.fc1 = NPNLinear(width * 4 + 1, width * 4)
self.nonlinear1 = NPNRelu()
# self.dropout1 = NPNDropout(self.fc_drop)
self.fc2 = NPNLinear(width * 4, 1)
def forward(self, x):
if self.type == 'vae':
wave, dis = x
x = wave.unsqueeze(1)
dis = dis.unsqueeze(1)
x = self.conv0(x)
x = self.block1.forward(x)
x = self.block2.forward(x)
x = self.block3.forward(x)
x = self.block4.forward(x)
x = self.block5.forward(x)
x = self.pooling(x)
x = self.conv_fc(x)
mean = x[:, :self.width * 2, :]
mean = mean.contiguous()
stddev = F.softplus(x[:, self.width * 2:, :])
stddev = stddev.contiguous()
mean = mean.view(mean.size(0), mean.size(1) * mean.size(2))
stddev = stddev.view(stddev.size(0), stddev.size(1) * stddev.size(2))
# normal_array = Variable(torch.normal(means=torch.zeros(mean.size()), std=1.0).cuda())
normal_array = Variable(stddev.data.new(stddev.size()).normal_())
z = normal_array.mul(stddev).add_(mean)
# x = torch.cat((dis, z), dim=1) # this is one solution
x_m = torch.cat((dis, mean), dim=1)
x_s = torch.cat((Variable(torch.zeros((x_m.size(0), 1)).cuda()), stddev), dim=1)
x = x_m, x_s
x = self.nonlinear1(self.fc1(x))
# x = self.dropout1(x)
x = self.fc2(x)
a_m, a_s = x
return a_m, a_s, mean, stddev, z
elif self.type == 'vae_1':
wave, dis = x
x = wave.unsqueeze(1)
dis = dis.unsqueeze(1)
x = self.conv0(x)
x = self.block1.forward(x)
x = self.block2.forward(x)
x = self.block3.forward(x)
x = self.block4.forward(x)
x = self.block5.forward(x)
x = self.block6.forward(x)
x = self.block7.forward(x)
x = self.pooling(x)
x = self.conv_fc(x)
mean = x[:, :self.width * 4, :]
mean = mean.contiguous()
stddev = F.softplus(x[:, self.width * 4:, :])
stddev = stddev.contiguous()
mean = mean.view(mean.size(0), mean.size(1) * mean.size(2))
stddev = stddev.view(stddev.size(0), stddev.size(1) * stddev.size(2))
# print('stddev shape', stddev.size(), self.width, x.size())
# normal_array = Variable(torch.normal(means=torch.zeros(mean.size()), std=1.0).cuda())
normal_array = Variable(stddev.data.new(stddev.size()).normal_())
z = normal_array.mul(stddev).add_(mean)
# print('z shape', z.size())
# x = torch.cat((dis, z), dim=1) # this is one solution
x_m = torch.cat((dis, mean), dim=1)
x_s = torch.cat((Variable(torch.zeros((x_m.size(0), 1)).cuda()), stddev), dim=1)
x = x_m, x_s
x = self.nonlinear1(self.fc1(x))
# x = self.dropout1(x)
x = self.fc2(x)
a_m, a_s = x
return a_m, a_s, mean, stddev, z
class AEEnc(nn.Module):
def __init__(self, args):
super(AEEnc, self).__init__()
self.type = args.enc_type
if self.type == 'AE':
self.width = 32
width = self.width
kernel_size = 3
self.conv0 = nn.Conv1d(1, width, kernel_size=kernel_size, stride=2, padding=4)
self.block1 = BottleNeck1d_3(in_channels=width, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.block2 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.block3 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.block4 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.block5 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.block6 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width * 2,
out_channels=width * 8, stride=2, kernel_size=kernel_size, group_num=width //2 )
self.block7 = BottleNeck1d_3(in_channels=width * 8, hidden_channels=width * 2,
out_channels=width * 8, stride=2, kernel_size=kernel_size, group_num=width // 2)
# 4 left
self.pooling = nn.AvgPool1d(kernel_size=2, stride=2)
self.fc1 = NPNLinear(width * 8 * 2 + 1, width * 4 * 4, dual_input=False)
self.nonlinear1 = NPNRelu()
# self.dropout1 = NPNDropout(self.fc_drop)
self.fc2 = NPNLinear(width * 4 * 4, 1)
def forward(self, x):
if self.type == 'AE':
wave, dis = x
x = wave.unsqueeze(1)
dis = dis.unsqueeze(1)
x = self.conv0(x)
x = self.block1.forward(x)
x = self.block2.forward(x)
x = self.block3.forward(x)
x = self.block4.forward(x)
x = self.block5.forward(x)
x = self.block6.forward(x)
x = self.block7.forward(x)
z = self.pooling(x)
zz = z.view(z.size(0), z.size(1) * z.size(2))
zz = torch.cat((dis, zz), dim=1)
x = self.nonlinear1(self.fc1(zz))
# x = self.dropout1(x)
x = self.fc2(x)
a_m, a_s = x
# print('size z', z.size())
return a_m, a_s, z
class VaeDec(nn.Module):
def __init__(self, args):
super(VaeDec, self).__init__()
self.type = args.enc_type
if self.type == 'vae' or self.type == 'vaemlp':
width = 16
kernel_size = 5
self.upsample_layer = nn.Upsample(scale_factor=2, mode='nearest')
self.de_block1 = DeBottleNeck1d_3G(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.de_block2 = DeBottleNeck1d_3G(in_channels=width * 4, hidden_channels=width,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.de_block3 = DeBottleNeck1d_3G(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.de_block4 = DeBottleNeck1d_3G(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.de_block5 = DeBottleNeck1d_3G(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.deconv = nn.ConvTranspose1d(width, 1, kernel_size=5, stride=2, padding=2+4, output_padding=1)
elif self.type == 'vae_1':
width = 16
kernel_size = 5
self.upsample_layer = nn.Upsample(scale_factor=4, mode='nearest')
self.de_block0 = DeBottleNeck1d_3G(in_channels=width * 8, hidden_channels=width * 2,
out_channels=width * 8, stride=2, kernel_size=kernel_size,
group_num=width)
self.de_block01 = DeBottleNeck1d_3G(in_channels=width * 8, hidden_channels=width * 2,
out_channels=width * 4, stride=2, kernel_size=kernel_size,
group_num=width)
self.de_block1 = DeBottleNeck1d_3G(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.de_block2 = DeBottleNeck1d_3G(in_channels=width * 4, hidden_channels=width,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.de_block3 = DeBottleNeck1d_3G(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.de_block4 = DeBottleNeck1d_3G(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.de_block5 = DeBottleNeck1d_3G(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.deconv = nn.ConvTranspose1d(width, 1, kernel_size=5, stride=2, padding=2+4, output_padding=1)
def forward(self, x):
if self.type == 'vae' or self.type == 'vaemlp':
x = x.view(x.size(0), x.size(1) // 8, 8)
x = self.upsample_layer(x)
x = self.de_block1.forward(x)
x = self.de_block2.forward(x)
x = self.de_block3.forward(x) #96
x = self.de_block4.forward(x) # 192
x = self.de_block5.forward(x) # 384
x = self.deconv(x)
x = x.squeeze(1)
return x
if self.type == 'vae_1':
x = x.view(x.size(0), x.size(1), 1)
x = self.upsample_layer(x)
x = self.de_block0.forward(x)
x = self.de_block01.forward(x)
x = self.de_block1.forward(x)
x = self.de_block2.forward(x)
x = self.de_block3.forward(x) #96
x = self.de_block4.forward(x) # 192
x = self.de_block5.forward(x) # 384
x = self.deconv(x)
x = x.squeeze(1)
return x
class AEDec(nn.Module):
def __init__(self, args):
super(AEDec, self).__init__()
self.type = args.enc_type
if self.type == 'AE':
width = 32
kernel_size = 5
self.upsample_layer = nn.Upsample(scale_factor=2, mode='nearest')
self.de_block0 = DeBottleNeck1d_3G(in_channels=width * 8, hidden_channels=width,
out_channels=width * 8, stride=2, kernel_size=kernel_size,
group_num=width // 2)
self.de_block01 = DeBottleNeck1d_3G(in_channels=width * 8, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size,
group_num=width // 2)
self.de_block1 = DeBottleNeck1d_3G(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.de_block2 = DeBottleNeck1d_3G(in_channels=width * 4, hidden_channels=width,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.de_block3 = DeBottleNeck1d_3G(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.de_block4 = DeBottleNeck1d_3G(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.de_block5 = DeBottleNeck1d_3G(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.deconv = nn.ConvTranspose1d(width, 1, kernel_size=3, stride=2, padding=1, output_padding=1)
def forward(self, x):
if self.type == 'AE':
x = self.upsample_layer(x)
x = self.de_block0.forward(x)
x = self.de_block01.forward(x)
x = self.de_block1.forward(x)
x = self.de_block2.forward(x)
x = self.de_block3.forward(x) #96
x = self.de_block4.forward(x) # 192
x = self.de_block5.forward(x) # 384
x = self.deconv(x)
x = x.squeeze(1)
x = x[:, 4:-4]
return x
class VaeMlpEnc(nn.Module):
def __init__(self, args):
super(VaeMlpEnc, self).__init__()
self.type = args.enc_type
if self.type == 'vaemlp':
self.width = 64
width = self.width
kernel_size = 5
self.conv0 = nn.Conv1d(1, width, kernel_size=kernel_size, stride=2, padding=4)
self.block1 = BottleNeck1d_3(in_channels=width, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.block2 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width // 2,
out_channels=width * 2, stride=2, kernel_size=kernel_size, group_num=width // 4)
self.block3 = BottleNeck1d_3(in_channels=width * 2, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.block4 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
self.block5 = BottleNeck1d_3(in_channels=width * 4, hidden_channels=width,
out_channels=width * 4, stride=2, kernel_size=kernel_size, group_num=width // 2)
# 16 left
self.pooling = nn.AvgPool1d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(width * 2 * 8 + 1, width * 2)
self.fc2 = nn.Linear(width * 2, 1)
def forward(self, x):
if self.type == 'vaemlp':
wave, dis = x
x = wave.unsqueeze(1)
dis = dis.unsqueeze(1)
x = self.conv0(x)
x = self.block1.forward(x)
x = self.block2.forward(x)
x = self.block3.forward(x)
x = self.block4.forward(x)
x = self.block5.forward(x)
x = self.pooling(x)
mean = x[:, :self.width * 2, :]
mean = mean.contiguous()
stddev = F.softplus(x[:, self.width * 2:, :])
stddev = stddev.contiguous()
mean = mean.view(mean.size(0), mean.size(1) * mean.size(2))
stddev = stddev.view(stddev.size(0), stddev.size(1) * stddev.size(2))
normal_array = Variable(torch.normal(means=torch.zeros(mean.size()), std=1.0).cuda())
z = mean + stddev * normal_array
# x = torch.cat((dis, z), dim=1) # this is one solution
x_m = torch.cat((dis, mean), dim=1)
# x_s = torch.cat((Variable(torch.zeros((x_m.size(0), 1)).cuda()), stddev), dim=1)
# x = x_m, x_s
x = F.relu(self.fc1(x_m))
# x = self.dropout1(x)
x = self.fc2(x)
return x, mean, stddev, z
|
[
"mcz13@mails.tsinghua.edu.cn"
] |
mcz13@mails.tsinghua.edu.cn
|
9c0186283e28e88a4fca848a40bc56217bd5258c
|
52bb670ddf48830f7ee1fe1343c48b94631f6a6a
|
/app/levels/SecondLevel.py
|
8349a3b1a324fc8ee63c8a946942a5ec1529d668
|
[] |
no_license
|
DmitryNeposidjaka/QuiQuaerit
|
dfbcf2991f9d2cf8cf90fb7aa1904734fa0de286
|
bf3654806fc2f7191e72892bce4d05cccc4286aa
|
refs/heads/master
| 2020-05-27T15:29:21.195780
| 2019-10-15T14:53:51
| 2019-10-15T14:53:51
| 188,681,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,185
|
py
|
import random
class SecondLevel:
number = 0
player_health = 10
enemy_health = 10
enemy_last_health = 10
enemy_numbers = []
enemy_last_number = 0
def __init__(self):
self.__gen_number()
self.player_health = 100
self.enemy_health = 100
self.enemy_last_health = 100
self.enemy_last_number = 0
self.enemy_numbers = {'win': [], 'lose': []}
def __gen_number(self):
self.number = random.randint(1, 100)
def __get_player_number(self):
inpt = input('Make try: ')
try:
return abs(int(inpt))
except:
print('Set numbers Only')
return self.__get_player_number()
def __get_enemy_number(self):
if self.enemy_last_number != 0:
if self.enemy_last_health > self.enemy_health:
self.enemy_numbers['lose'].append(self.enemy_last_number)
else:
self.enemy_numbers['win'].append(self.enemy_last_number)
if len(self.enemy_numbers['win']) == 0 or len(self.enemy_numbers['lose']) == 0:
guess = random.randint(1, 100)
else:
guess = random.randint(1, 100)
self.enemy_last_number = guess
self.enemy_last_health = self.enemy_health
return guess
def __process_numbers(self, player_number, enemy_number):
player_res = abs(player_number - self.number)
ai_res = abs(enemy_number - self.number)
if player_res == 0:
self.enemy_health = 0
elif ai_res == 0:
self.player_health = 0
elif player_res > ai_res:
self.player_health -= 1
else:
self.enemy_health -= 1
def play(self):
while self.player_health > 0 and self.enemy_health > 0:
player_guess = random.randint(1, 100)
enemy_guess = self.__get_enemy_number()
self.__process_numbers(player_guess, enemy_guess)
print('You: {}\nEnemy: {}'.format(self.player_health, self.enemy_health))
if self.player_health <= 0:
print('You loose!')
else:
print('You won')
f = open('./test.txt', 'a+')
# f.write('{:-^10}\n{win} | {lose}\nwin middle: {WM}\nlose middle: {LM}\n'.format(self.number,
# win=self.enemy_numbers['win'],
# lose=self.enemy_numbers['lose'],
# WM=round(sum(self.enemy_numbers['win']) / (len(self.enemy_numbers['win']) +1)),
# LM=round(sum(self.enemy_numbers['lose']) / (len(self.enemy_numbers['lose']) +1))
# )
# )
f.write('p1: [{p1_wins}, {p1_loses}]\np2: [{p2_wins}, {p2_loses}]'.format())
f.close()
#print(self.enemy_numbers)
#print('{:-^10}'.format(self.number))
|
[
"dmitry.neposidyaka@uadevelopers.com"
] |
dmitry.neposidyaka@uadevelopers.com
|
5eb64c0f409d8b1a0cd06882e2362c70de3f2844
|
87e3d4b60f39417535afb41acfe07fb706504ead
|
/locallibrary/urls.py
|
a1124247039ffb9f54e8c78ce464c769a230a830
|
[] |
no_license
|
antonyuhnovets/Django_proj_test
|
5f1a81ff3643586935da13dd778b1623e9f0e75e
|
69e7642c9e24b5a997d2640dea311b3b974f3fc3
|
refs/heads/main
| 2023-09-01T13:44:17.273659
| 2021-10-22T09:33:05
| 2021-10-22T09:33:05
| 418,429,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
"""locallibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
from django.conf.urls import url
from django.views.generic import RedirectView
from django.conf import settings
from django.conf.urls.static import static
import re
urlpatterns = [
path('admin/', admin.site.urls),
]
urlpatterns += [
path('catalog/', include('catalog.urls')),
]
urlpatterns += [
url('accounts/', include('django.contrib.auth.urls')),
]
|
[
"Vaker1990@gmail.com"
] |
Vaker1990@gmail.com
|
449b82d8a2e6748e6f7a1f6ac56f6405019e55eb
|
acb81af03cbbf126b4ff7ae88c56454ef45b35d3
|
/accounts/migrations/0001_initial.py
|
2bb8a5055298c09e50882c7fb20de9e5d3687772
|
[] |
no_license
|
Vaibhav3009/ealter-heroku
|
6b0ebf56b74ed545f495696b2d3aa5e37893c8f1
|
8298b34f25e7f8c7fd7d67463e5155c5e002af83
|
refs/heads/master
| 2022-10-25T19:55:58.754233
| 2020-06-15T22:47:35
| 2020-06-15T22:47:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
# Generated by Django 3.0.6 on 2020-06-09 11:09
import accounts.models
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('full_name', models.CharField(blank=True, max_length=130, verbose_name='full name')),
('is_staff', models.BooleanField(default=False, verbose_name='is_staff')),
('is_active', models.BooleanField(default=True, verbose_name='is_active')),
('date_joined', models.DateField(default=datetime.date.today, verbose_name='date_joined')),
('phone_number', models.IntegerField(unique=True)),
('country_code', models.IntegerField()),
('password', models.CharField(blank=True, max_length=100)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', accounts.models.UserManager()),
],
),
]
|
[
"bansaljatin2810@gmail.com"
] |
bansaljatin2810@gmail.com
|
67cc65aade6945b85f11d8b9f0344585a2212bbc
|
6b40931daafbf9dae280579a41e8dd754fa91f9c
|
/python/skScriptedNoiseDeformer.py
|
7191cd2a9670b7ee1040ac83e503b2be09b09a01
|
[
"MIT"
] |
permissive
|
skeelogy/maya-skNoiseDeformer
|
07aaa94101a23acae8cc1c9b14bed7ea7ac78f63
|
e853aee238be695685181d93c0817a21296dab9f
|
refs/heads/master
| 2016-08-03T05:17:54.624740
| 2014-10-27T16:18:20
| 2014-10-27T16:18:20
| 20,463,496
| 10
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,902
|
py
|
"""
@author: Skeel Lee
@contact: skeel@skeelogy.com
@since: 30 May 2014
A noise deformer plugin for Maya. It deforms meshes using fBm (fractional
Brownian motion) which adds up multiple layers of Simplex noises.
---------Usage-------------
1) Load the plugin, either using the Plug-in Manager or using the following MEL
command:
loadPlugin "skScriptedNoiseDeformer.py"
2) Select a mesh
3) Attach a new noise deformer to the mesh by executing the following MEL
command:
deformer -type skScriptedNoiseDeformer
4) Adjust the noise attributes (e.g. amplitude, frequency, octaves, lacunarity)
in the channel box accordingly
5) Move/rotate/scale the accessory locator to transform the noise space, as
desired
---------Notes-------------
In order to get the fastest speed out of this Python plugin, I would recommend
compiling/installing the noise library into mayapy.
1) Download the noise library from Casey Duncan at
https://github.com/caseman/noise. This includes some C files that needs to
be compiled into Python modules.
2) You will need a Python.h header file. If you do not already have that,
execute this command in a terminal (or the equivalent in other Linux
distros):
> sudo apt-get install python-dev
3) Execute this command in a terminal to compile and install the Python modules
into mayapy:
> sudo `which mayapy` setup.py install
4) To verify that the installation has worked, try doing this in a shell:
> mayapy
[mayapy shell loads...]
>>> import noise
>>> noise.snoise3(2, 8, 3)
-0.6522196531295776
Note that this Python plugin will still work if you are unable to perform the
steps above. The plugin will fall back to a pure-Python perlin.py module from
Casey Duncan if it cannot find the compiled noise module above. The speed is
much slower though and I would strongly recommend getting the above steps to
work if you are keen to use this Python plugin.
---------Credits-------------
This plugin uses the noise library from Casey Duncan:
https://github.com/caseman/noise
---------License-------------
Released under The MIT License (MIT) Copyright (c) 2014 Skeel Lee
(http://cg.skeelogy.com)
"""
try:
#import the faster C-based noise module
#if user has compiled/installed it to mayapy
import noise
except:
#otherwise just import the slower pure-python perlin module
#because it works out-of-the-box without installation
import libnoise.perlin
noise = libnoise.perlin.SimplexNoise()
import sys
import maya.OpenMaya as om
import maya.OpenMayaMPx as omMPx
nodeType = 'skScriptedNoiseDeformer'
nodeVersion = '1.0'
nodeId = om.MTypeId(0x001212C1) #unique id obtained from ADN
EPSILON = 0.0000001
class SkScriptedNoiseDeformer(omMPx.MPxDeformerNode):
amp = om.MObject()
freq = om.MObject()
offset = om.MObject()
octaves = om.MObject()
lacunarity = om.MObject()
persistence = om.MObject()
locatorWorldSpace = om.MObject()
def __init__(self):
super(SkScriptedNoiseDeformer, self).__init__()
def deform(self, dataBlock, geomIter, localToWorldMat, multiIndex):
#get envelope value, return if sufficiently near to 0
envDataHandle = dataBlock.inputValue(self.envelope)
envFloat = envDataHandle.asFloat()
if envFloat <= EPSILON:
return
#get attribute values
ampDataHandle = dataBlock.inputValue(self.amp)
ampFloats = ampDataHandle.asFloat3()
freqDataHandle = dataBlock.inputValue(self.freq)
freqFloats = freqDataHandle.asFloat3()
offsetDataHandle = dataBlock.inputValue(self.offset)
offsetFloats = offsetDataHandle.asFloat3()
octavesDataHandle = dataBlock.inputValue(self.octaves)
octavesInt = octavesDataHandle.asInt()
lacunarityDataHandle = dataBlock.inputValue(self.lacunarity)
lacunarityFloat = lacunarityDataHandle.asFloat()
persistenceDataHandle = dataBlock.inputValue(self.persistence)
persistenceFloat = persistenceDataHandle.asFloat()
locatorWorldSpaceDataHandle = dataBlock.inputValue(self.locatorWorldSpace)
locatorWorldSpaceMat = locatorWorldSpaceDataHandle.asMatrix()
#precompute some transformation matrices
localToLocatorSpaceMat = localToWorldMat * locatorWorldSpaceMat.inverse()
locatorToLocalSpaceMat = locatorWorldSpaceMat * localToWorldMat.inverse()
#iterate through all the points
while not geomIter.isDone():
#get weight value for this point, continue if sufficiently near to 0
weightFloat = self.weightValue(dataBlock, multiIndex, geomIter.index())
if weightFloat <= EPSILON:
continue
#get locator space position
pos = geomIter.position()
pos *= localToLocatorSpaceMat
#precompute some values
noiseInputX = freqFloats[0] * pos.x - offsetFloats[0]
noiseInputY = freqFloats[1] * pos.y - offsetFloats[1]
noiseInputZ = freqFloats[2] * pos.z - offsetFloats[2]
envTimesWeight = envFloat * weightFloat
#calculate new position
pos.x += ampFloats[0] * noise.snoise3(
x = noiseInputX, y = noiseInputY, z = noiseInputZ,
octaves = octavesInt,
lacunarity = lacunarityFloat,
persistence = persistenceFloat
) * envTimesWeight
pos.y += ampFloats[1] * noise.snoise3(
x = noiseInputX + 123, y = noiseInputY + 456, z = noiseInputZ + 789,
octaves = octavesInt,
lacunarity = lacunarityFloat,
persistence = persistenceFloat
) * envTimesWeight
pos.z += ampFloats[2] * noise.snoise3(
x = noiseInputX + 234, y = noiseInputY + 567, z = noiseInputZ + 890,
octaves = octavesInt,
lacunarity = lacunarityFloat,
persistence = persistenceFloat
) * envTimesWeight
#convert back to local space
pos *= locatorToLocalSpaceMat
#set new position
geomIter.setPosition(pos)
geomIter.next()
def accessoryNodeSetup(self, dagMod):
thisObj = self.thisMObject()
#get current object name
thisFn = om.MFnDependencyNode(thisObj)
thisObjName = thisFn.name()
#create an accessory locator for user to manipulate a local deformation space
locObj = dagMod.createNode('locator')
dagMod.doIt()
#rename transform and shape nodes
dagMod.renameNode(locObj, thisObjName + '_loc')
locDagPath = om.MDagPath()
locDagFn = om.MFnDagNode(locObj)
locDagFn.getPath(locDagPath)
locDagPath.extendToShape()
locShapeObj = locDagPath.node()
dagMod.renameNode(locShapeObj, thisObjName + '_locShape')
#connect locator's worldMatrix to locatorWorldSpace
locFn = om.MFnDependencyNode(locObj)
worldMatrixAttr = locFn.attribute('worldMatrix')
dagMod.connect(locObj, worldMatrixAttr, thisObj, self.locatorWorldSpace)
def accessoryAttribute(self):
return self.locatorWorldSpace
#creator function
def nodeCreator():
return omMPx.asMPxPtr(SkScriptedNoiseDeformer())
#init function
def nodeInitializer():
outputGeom = omMPx.cvar.MPxDeformerNode_outputGeom
#amplitude attr
nAttr = om.MFnNumericAttribute()
SkScriptedNoiseDeformer.amp = nAttr.createPoint('amplitude', 'amp')
nAttr.setDefault(1.0, 1.0, 1.0)
nAttr.setKeyable(True)
SkScriptedNoiseDeformer.addAttribute(SkScriptedNoiseDeformer.amp)
SkScriptedNoiseDeformer.attributeAffects(SkScriptedNoiseDeformer.amp, outputGeom)
#frequency attr
nAttr = om.MFnNumericAttribute()
SkScriptedNoiseDeformer.freq = nAttr.createPoint('frequency', 'freq')
nAttr.setDefault(1.0, 1.0, 1.0)
nAttr.setKeyable(True)
SkScriptedNoiseDeformer.addAttribute(SkScriptedNoiseDeformer.freq)
SkScriptedNoiseDeformer.attributeAffects(SkScriptedNoiseDeformer.freq, outputGeom)
#offset attr
nAttr = om.MFnNumericAttribute()
SkScriptedNoiseDeformer.offset = nAttr.createPoint('offset', 'off')
nAttr.setDefault(0.0, 0.0, 0.0)
nAttr.setKeyable(True)
SkScriptedNoiseDeformer.addAttribute(SkScriptedNoiseDeformer.offset)
SkScriptedNoiseDeformer.attributeAffects(SkScriptedNoiseDeformer.offset, outputGeom)
#octaves attr
nAttr = om.MFnNumericAttribute()
SkScriptedNoiseDeformer.octaves = nAttr.create('octaves', 'oct', om.MFnNumericData.kInt, 1)
nAttr.setMin(1)
nAttr.setKeyable(True)
SkScriptedNoiseDeformer.addAttribute(SkScriptedNoiseDeformer.octaves)
SkScriptedNoiseDeformer.attributeAffects(SkScriptedNoiseDeformer.octaves, outputGeom)
#lacunarity attr
nAttr = om.MFnNumericAttribute()
SkScriptedNoiseDeformer.lacunarity = nAttr.create('lacunarity', 'lac', om.MFnNumericData.kFloat, 2.0)
nAttr.setKeyable(True)
SkScriptedNoiseDeformer.addAttribute(SkScriptedNoiseDeformer.lacunarity)
SkScriptedNoiseDeformer.attributeAffects(SkScriptedNoiseDeformer.lacunarity, outputGeom)
#persistence attr
nAttr = om.MFnNumericAttribute()
SkScriptedNoiseDeformer.persistence = nAttr.create('persistence', 'per', om.MFnNumericData.kFloat, 0.5)
nAttr.setKeyable(True)
SkScriptedNoiseDeformer.addAttribute(SkScriptedNoiseDeformer.persistence)
SkScriptedNoiseDeformer.attributeAffects(SkScriptedNoiseDeformer.persistence, outputGeom)
#locatorWorldSpace attr
mAttr = om.MFnMatrixAttribute()
SkScriptedNoiseDeformer.locatorWorldSpace = mAttr.create('locatorWorldSpace', 'locsp')
mAttr.setStorable(False)
mAttr.setHidden(True)
SkScriptedNoiseDeformer.addAttribute(SkScriptedNoiseDeformer.locatorWorldSpace)
SkScriptedNoiseDeformer.attributeAffects(SkScriptedNoiseDeformer.locatorWorldSpace, outputGeom)
#init plugin
def initializePlugin(mObject):
mPlugin = omMPx.MFnPlugin(mObject, "Skeel Lee", nodeVersion, "Any")
try:
mPlugin.registerNode(nodeType, nodeId, nodeCreator, nodeInitializer, omMPx.MPxNode.kDeformerNode)
except:
sys.stderr.write('Failed to register deformer node: %s\n' % (nodeType))
raise
#uninit plugin
def uninitializePlugin(mObject):
mPlugin = omMPx.MFnPlugin(mObject)
try:
mPlugin.deregisterNode(nodeId)
except:
sys.stderr.write('Failed to deregister deformer node: %s\n' % (nodeType))
raise
|
[
"skeel@skeelogy.com"
] |
skeel@skeelogy.com
|
a0f0e296b65c24b3f22b2d9f9128df08da47cd87
|
8d50ada4abfd790d407340e218c18b4f04ba570a
|
/4-7.py
|
d04bab371372f8037035ddd110df0c92a8be439f
|
[] |
no_license
|
prasanna1695/python-code
|
b3ca875178f4645d332f974588117994ad6c3e01
|
e62d700da292dc4e6cedebbe54dcd3f25b936ed5
|
refs/heads/master
| 2021-05-27T21:11:57.328421
| 2014-05-14T04:20:26
| 2014-05-14T04:20:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,508
|
py
|
#You have two very large binary trees: T1, with millions of nodes, and T2, with hundreds of nodes.
#Create an algorithm to decide if T2 is a subtree of T1
class Tree(object):
def __init__(self, sorted_array):
if sorted_array == []:
self.data = None
else:
self.data = sorted_array[len(sorted_array)/2]
if sorted_array[:len(sorted_array)/2] != []:
self.left = Tree(sorted_array[:len(sorted_array)/2])
else:
self.left = None
if sorted_array[(len(sorted_array)/2)+1:] != []:
self.right = Tree(sorted_array[(len(sorted_array)/2)+1:])
else:
self.right = None
def isASubtree(T1, T2):
if T2 == None or T2.data == None:
return True
else:
return traverseBigTree(T1,T2)
def traverseBigTree(T1,T2):
if T1 == None:
return False
if T1.data == T2.data:
if checkChildren(T1,T2):
return True
return isASubtree(T1.right, T2) or isASubtree(T1.left, T2)
def checkChildren(T1,T2):
if (T1 == None and T2 == None) or (T1.data == None and T2.data == None):
return True
elif T1 == None or T2 == None or T1.data == None or T2.data == None:
return False
elif T1.data != T2.data:
return True
else:
return checkChildren(T1.right,T2.right) and checkChildren(T1.left, T2.left)
T1 = Tree([])
T2 = Tree([0])
print "Test1: T1 = [], T2 = [0]"
print isASubtree(T1,T2) == False
T1 = Tree([])
T2 = Tree([])
print "Test2: T1 = [], T2 = []"
print isASubtree(T1,T2) == True
T1 = Tree([0])
T2 = Tree([])
print "Test3: T1 = [0], T2 = []"
print isASubtree(T1,T2) == True
T1 = Tree([1])
T2 = Tree([1])
print "Test4: T1 = [1], T2 = [1]"
print isASubtree(T1,T2) == True
T1 = Tree([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
T1 = Tree([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
print "Test5: T1 = T2 = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]"
print isASubtree(T1,T2) == True
T1 = Tree([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
T2 = Tree([5,6,7])
print "Test6: T1 = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15], T2 = [5,6,7]"
print isASubtree(T1,T2) == True
T1 = Tree([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
T2 = Tree([2,4,6])
print "Test7: T1 = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15], T2 = [2,4,6]"
print isASubtree(T1,T2) == False
T1 = Tree([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
T2 = Tree([1,2,3,4,5,6,7])
print "Test8: T1 = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15], T2 = [1,2,3,4,5,6,7]"
print isASubtree(T1,T2) == True
T1 = Tree([1,2,3,4,5,6,7,8,9,10])
T2 = Tree([])
T2.data = 8
T2.left = Tree([])
T2.left.data = 7
print "Test9: T1 = [1,2,3,4,5,6,7,8,9,10], T2 = [7,8,_]"
print isASubtree(T1,T2) == True
|
[
"paulnogas@gmail.com"
] |
paulnogas@gmail.com
|
eab442983dcf502997761d076ae343e011c0730d
|
94da14ff366651bd58bbd53abd3b1816a2292fb8
|
/server/server.wsgi
|
6ddeb5dc5d6d456d679c40d52c461edb6427f9af
|
[] |
no_license
|
KaranPhadnisNaik/waketfup
|
6d4fc3fafaafb8c4cda0b2e84365b4b346740fd8
|
742f0ba5ce6d4af4141ca073991205b1746612f8
|
refs/heads/master
| 2021-01-18T18:59:13.772111
| 2017-04-28T23:02:47
| 2017-04-28T23:02:47
| 86,879,488
| 0
| 5
| null | 2017-04-28T23:02:48
| 2017-04-01T03:12:16
|
Python
|
UTF-8
|
Python
| false
| false
| 507
|
wsgi
|
import sys
from flask import Flask
from server import api
from extensions import mysql
app = Flask(__name__)
app.config.from_object(__name__)
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = ''
app.config['MYSQL_DATABASE_DB'] = 'wakeup'
app.config['MYSQL_DATABASE_HOST'] = '127.0.0.1'
mysql.init_app(app)
app.register_blueprint(api, url_prefix='/wakeup/api')
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000, debug=True)
#app.run(host='0.0.0.0', port=80)
|
[
"Karanphadnis1@gmail.com"
] |
Karanphadnis1@gmail.com
|
ca706a17599985c33d41dd458171dc437fd326a0
|
60abb1b8aa61764ae66488755fecbfc8baeba47e
|
/tools/pydocgen/pydocgen/__init__.py
|
121fa0c78708c3628f24c9eab932eaec660b91e1
|
[] |
no_license
|
bishoywagih/docs
|
4789c3860fc423e77bec9f58574d0fa02c8089e3
|
80989180bcd53ae28e94a03f67ff97159efa1884
|
refs/heads/master
| 2020-04-20T01:36:16.496092
| 2019-01-30T23:57:55
| 2019-01-30T23:57:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,501
|
py
|
"""
(A) Pulumi Python documentation generator, at your service!
This module provides a mechanism for producing HTML documentation for Pulumi packages directly from their source, in a
format that is amenable for inclusion in the Pulumi Docs repo. It accomplishes this using a two-fold transformation:
1. This script walks all providers that it intends to generate documentation and generates an input to Sphinx, the
documentation generator. The output of this stage is a directory full of reStructuredText files (.rst) that
Sphinx will interpret.
2. The script invokes sphinx directly. Sphinx walks the packages that we intend to document and generates a lot of
documentation for them. We use the "json" builder for Sphinx, which is a target that splats a large amount of
HTML into a JSON document for each input RST file that we gave it.
3. The script processes the JSON output of Sphinx and produces a series of folders and Markdown documents that our
Jekyll front-end is aware of and can render in a reasonable fashion in the context of our docs website.
This is a little crazy. I will understand if you hate me. However, this script is very effective at what it does, mostly
because Sphinx is an incredibly powerful tool that is well-suited for this purpose. The "correct" way to accomplish this
task is likely to create a custom Sphinx theme that outputs HTML directly in the format that our site expects, but this
is "hard" (read: time-consuming for the author).
"""
import glob
import json
from os import path, mkdir
import shutil
from subprocess import check_call
import sys
import tempfile
from typing import NamedTuple, List
from jinja2 import Environment, PackageLoader, select_autoescape
class Project(NamedTuple):
"""
A Project is a collection of metadata about the current project that we'll feed to Sphinx.
"""
name: str
copyright: str
author: str
version: str
release: str
class Provider(NamedTuple):
"""
A provider is a tuple of "name" (a human-readable name) and "package_name" (the actual Python package name).
"""
name: str
package_name: str
class Input(NamedTuple):
"""
Input is the schema of the JSON document loaded as an input to the documentation generator. It contains metadata
about the current project (see Project) and a list of providers that we intend to document.
"""
project: Project
providers: List[Provider]
class Context(NamedTuple):
"""
The context is some state kept around during the transformation process.
"""
template_env: Environment
tempdir: str
outdir: str
mdoutdir: str
input: Input
def read_input(input_file: str) -> Input:
"""
read_input produces an Input from an input file with the given filename.
:param str input_file: Filename of a JSON file to read inputs from.
:returns str: An Input representing the current run of the tool.
"""
with open(input_file) as f:
input_dict = json.load(f)
project = Project(**input_dict["project"])
providers = []
for provider in input_dict.get("providers") or []:
providers.append(Provider(**provider))
return Input(project=project, providers=providers)
def render_template_to(ctx: Context, dest: str, template_name: str, **kwargs):
"""
Helper function for rendering templates to the context's temporary directory.
:param Context ctx: The current context.
:param str dest: The destination path relative to the root of the output directory.
:param str template_name: The name of the template to render.
:param **kwargs: Passed verbatim to the template.
"""
template_instance = ctx.template_env.get_template(template_name)
out_path = path.join(ctx.tempdir, dest)
with open(out_path, "w") as f:
rendered = template_instance.render(**kwargs)
f.write(rendered)
def generate_sphinx_files(ctx: Context):
"""
Generates Sphinx input from the list of packages given to this tool. The Sphinx input is saved in the temporary
directory created by the context (ctx.tempdir).
"""
# Sphinx expects a conf.py file at the root of the folder - render it.
render_template_to(ctx, "conf.py", "conf.py", input=ctx.input)
# We're also shipping a Sphinx plugin to hack our docstrings.
render_template_to(ctx, "markdown_docstring.py", "markdown_docstring.py")
# Sphinx begins at index.rst and walks it recursively to discover all files to render. Although we're not using the
# output of index.rst in any way, we must still render it to refer to all of the provider pages that we intend to
# document so that Sphinx knows to recurse into them.
render_template_to(ctx, "index.rst", "index.rst", input=ctx.input)
create_dir(ctx.tempdir, "providers")
create_dir(ctx.tempdir, "_static") # Sphinx complains if this isn't there.
# Templates that we intend to use.
without_module_template = path.join("providers", "provider_without_module.rst")
with_module_template = path.join("providers", "provider_with_module.rst")
module_template = path.join("providers", "module.rst")
for provider in ctx.input.providers:
doc_path = path.join("providers", f"{provider.package_name}.rst")
# __import__ is Python magic - it literally imports the package that we're about to document. For this reason
# (and because Sphinx does something similar), the packages that we are documenting MUST be installed in the
# current environment.
module = __import__(provider.package_name)
# The reason we're importing the module is to inspect its `__all__` member - so we can discover any submodules
# that this module has.
#
# TFGen explicitly populates this array.
if not hasattr(module, "__all__"):
# No submodules? Render the without_module_template and be done.
render_template_to(ctx, doc_path, without_module_template, provider=provider)
else:
# If there are submodules, run through each one and render module templates for each one.
all_modules = getattr(module, "__all__")
render_template_to(ctx, doc_path, with_module_template, provider=provider, submodules=all_modules)
create_dir(ctx.tempdir, "providers", provider.package_name)
for module in all_modules:
dest = path.join("providers", provider.package_name, f"{module}.rst")
module_meta = {"name": module, "full_name": f"{provider.package_name}.{module}"}
render_template_to(ctx, dest, module_template, module=module_meta)
def build_sphinx(ctx: Context):
"""
build_sphinx invokes Sphinx on the inputs that we generated in `generate_sphinx_files`.
:param Context ctx: The current context.
"""
check_call(["sphinx-build", "-j", "auto", "-b", "json", ctx.tempdir, ctx.outdir])
def transform_sphinx_output_to_markdown(ctx: Context):
"""
Transforms the Sphinx output in `ctx.outdir` to markdown by post-processing the JSON output by Sphinx. The directory
structure written by this function mirrors the `reference/pkg` directory in the docs repo, so that `reference/pkg`
can serve as an output directory of this script.
:param Context ctx: The current context.
"""
out_base = create_dir(ctx.mdoutdir, "python")
base_json = path.join(ctx.outdir, "providers")
for provider in ctx.input.providers:
provider_path = create_dir(out_base, provider.package_name)
provider_sphinx_output = path.join(base_json, provider.package_name)
# If this thing has submodules, provider_sphinx_output is a directory and it exists.
if path.exists(provider_sphinx_output):
create_markdown_file(f"{provider_sphinx_output}.fjson", path.join(provider_path, "index.md"))
# Recurse through all submodules (all fjson files in this directory) and produce folders with an index.md
# in them.
for file in glob.iglob(path.join(provider_sphinx_output, "*.fjson")):
module_name = path.splitext(path.basename(file))[0]
module_path = create_dir(provider_path, module_name)
create_markdown_file(file, path.join(module_path, "index.md"))
else:
# Otherwise, just drop an index.md in the provider directory.
create_markdown_file(f"{provider_sphinx_output}.fjson", path.join(provider_path, "index.md"))
def create_dir(*args):
full_path = path.join(*args)
if not path.exists(full_path):
mkdir(full_path)
return full_path
def create_markdown_file(file: str, out_file: str):
"""
Derives a Markdown file from the Sphinx output file `file` and saves the result to `out_file`.
:param str file: Sphinx output file, to be used as the source of data to derive a Markdown file. It is technically
JSON but in reality it's a JSON object with a "body" property that's filled with HTML.
:param str out_file: The name of the Markdown file to output.
"""
with open(file) as f:
contents = json.load(f)
with open(out_file, "w") as f:
# The "body" property of Sphinx's JSON is basically the rendered HTML of the documentation on this page. We're
# going to slam it verbatim into a file and call it Markdown, because we're professionals.
f.write(contents["body"])
def main():
if len(sys.argv) != 2:
print("usage: python -m pydocgen <output_dir>")
exit(1)
output_directory = sys.argv[1]
input = read_input("pulumi-docs.json")
env = Environment(
loader=PackageLoader('pydocgen', 'templates'),
autoescape=select_autoescape(['html', 'xml']))
tempdir = tempfile.mkdtemp()
outdir = tempfile.mkdtemp()
mdoutdir = output_directory
ctx = Context(template_env=env, input=input, tempdir=tempdir, outdir=outdir, mdoutdir=mdoutdir)
try:
print("Generating Sphinx input...")
generate_sphinx_files(ctx)
print("Running Sphinx...")
build_sphinx(ctx)
print("Transforming Sphinx output into Markdown...")
transform_sphinx_output_to_markdown(ctx)
print("Done!")
finally:
if path.exists(tempdir):
pass
#shutil.rmtree(tempdir)
if path.exists(outdir):
pass
#shutil.rmtree(outdir)
|
[
"noreply@github.com"
] |
bishoywagih.noreply@github.com
|
97853189dfe18bc6b81575e4fc52c8f159a94321
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02725/s504683276.py
|
b99ec7dcd7bc0b16982ffe576e9e58a195be308d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
import math
def resolve():
import sys
input = sys.stdin.readline
# row = [int(x) for x in input().rstrip().split(" ")]
kn = [int(x) for x in input().rstrip().split(" ")]
k = kn[0]
a = [int(x) for x in input().rstrip().split(" ")]
max_dist = max([a[i+1] - a[i] for i in range(len(a)-1)])
max_dist = max_dist if max_dist > a[0] + k - a[len(a)-1] else a[0] + k - a[len(a)-1]
print(k - max_dist)
if __name__ == "__main__":
resolve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
36fc95272f6918a61acd36ed1e05a25ca3d4799a
|
c2af656559d4330d744b68e1f6f044c0903ca6e4
|
/main/main_b.py
|
f6bb0159711eb15b54d08d476432e790cb58fdf4
|
[] |
no_license
|
mvattiku/insightProject
|
666d32276d9cd850dea8df5d153a062c0cd977bb
|
d7a487bbaef4cf9bd422c13382f761e5f6166302
|
refs/heads/master
| 2020-04-17T09:05:59.089546
| 2019-03-05T01:05:18
| 2019-03-05T01:05:18
| 166,444,673
| 0
| 0
| null | 2019-01-26T02:08:56
| 2019-01-18T17:06:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,873
|
py
|
"""
Sample on how to use the batch_util.
To Run: "python main_batch.py BatchConfig.yml AwsConfig.ini
need to provide two command line arguments.
Argument 1 = yaml file path with batch parameters defined
Argument 2 = aws config(.ini) file path with aws account info (such as aws_key, region, ...)
"""
#_____________________________________________________________________________________________________
import sys
import os
import configparser
import boto3
from batch_service.batch_util import Batch
if __name__ == '__main__':
try:
yaml_file_path = sys.argv[1] #batch yaml file
config_file_path = sys.argv[2] #aws config file
aws_config = configparser.ConfigParser()
aws_config.read(config_file_path)
except Exception as e:
e.args += ("Need to provide a parameters file", )
raise
#aws
region=aws_config.get('aws', 'region')
aws_access_key_id = aws_config.get('aws', 'aws_access_key_id')
aws_secret_access_key = aws_config.get('aws', 'aws_secret_access_key')
aws_id = aws_config.get('aws', 'aws_id')
#aws session and batch client
session = boto3.Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
client = session.client('batch', 'us-west-2')
#Batch Util Obj
batch = Batch(client=client, config_file=yaml_file_path) #must provide client to be able to use other Batch methods
#create computer environement
batch.create_compute_environment()
#create job queue
batch.create_job_queue()
#create job definition
batch.create_job_definition()
#create job
batch.create_job()
#get latest job definition version
latest_version = batch.latest_version_job_definition(job_def_name="test") #if parameter not provide here, then job_Def_name from yaml file will be used
|
[
"monisha.aishwarya.vatikuti@macys.com"
] |
monisha.aishwarya.vatikuti@macys.com
|
e7c51f8dbe86330a891a6069eb59a25c0cf63908
|
211ba663bb1086047b9b5c5689f0abf64038e7b1
|
/STORE/product/admin.py
|
40dbdce5ab80ceaf135b7561516e4fb8004f5045
|
[] |
no_license
|
form-merch-llc/store
|
7a587530fcf94796375b1120363a49935fd73e04
|
1821077214001fdcf5dd30388831dcce07fd0d79
|
refs/heads/master
| 2022-11-14T16:12:58.186641
| 2020-07-14T08:54:05
| 2020-07-14T08:54:05
| 277,132,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
from django.contrib import admin
from .models import (
Attribute,
Image,
Product,
Type,
Variant,
Value,
)
class AttributeInline(admin.StackedInline):
model = Attribute
class ImageInline(admin.StackedInline):
model = Image
class VariantInline(admin.StackedInline):
model = Variant
class ValueInline(admin.StackedInline):
model = Value
@admin.register(Attribute)
class AttributeAdmin(admin.ModelAdmin):
list_display = ["pk", "name"]
inlines = [ValueInline]
@admin.register(Image)
class ImageAdmin(admin.ModelAdmin):
list_display = ["pk", "alt"]
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ["pk", "name"]
inlines = [VariantInline]
@admin.register(Type)
class TypeAdmin(admin.ModelAdmin):
list_display = ["pk", "name"]
@admin.register(Variant)
class VariantAdmin(admin.ModelAdmin):
list_display = ["pk", "name"]
inlines = [ImageInline]
@admin.register(Value)
class ValueAdmin(admin.ModelAdmin):
list_display = ["pk", "name"]
|
[
"khasbilegt.ts@gmail.com"
] |
khasbilegt.ts@gmail.com
|
bca52650f8f2c99776a40778147b90707b570634
|
03ab708d3725b5ed52217effc4f8e8ca5c889632
|
/boletin/migrations/0061_gruposcoutfalse_apellidojefe.py
|
bff288f7b4349959cbc813b1a937f1e629bb7ea7
|
[] |
no_license
|
esiemprelisto/webapp
|
443b8f89b088c3062b1fcf7dab24387fb34a673a
|
5e7651ca4ff0103711220a0b750b7ac483504741
|
refs/heads/master
| 2020-03-22T10:40:13.369146
| 2018-07-06T02:08:20
| 2018-07-06T02:08:20
| 139,918,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-04 22:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boletin', '0060_gruposcoutfalse'),
]
operations = [
migrations.AddField(
model_name='gruposcoutfalse',
name='apellidoJefe',
field=models.CharField(default='1', max_length=100),
preserve_default=False,
),
]
|
[
"alejandro9980@gmail.com"
] |
alejandro9980@gmail.com
|
bb26f478e15b476158222993e3f5de711d68404f
|
5d5560e10938830d2ee5adaabc2ebe723a3fe9c8
|
/hermes_cms/tests/views/utils/mocks.py
|
046ced7e16dde3c781cb91e4db7aa60e4df5de57
|
[] |
no_license
|
pmcilwaine/hermes
|
3c652c27f99c06baa851708e8461ada58d0772db
|
e9d6f5aeeb12824e68b326dcb0c346c50c5b6f38
|
refs/heads/develop
| 2021-01-13T01:54:44.656366
| 2015-10-12T09:39:42
| 2015-10-12T09:39:42
| 33,728,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from mock import MagicMock
def mock_modules():
sys.modules['hermes_cms.core.log'] = MagicMock()
|
[
"paul.mcilwaine@gmail.com"
] |
paul.mcilwaine@gmail.com
|
e89e32ee06554604411d2af3dece7826dfc3c2f0
|
d841fd397bde4f0ac2444606ac13af3e8e27542c
|
/21_merge-two-sorted-lists.py
|
6f120782df0e11af887fd10a7f91c4c7ca98e15e
|
[] |
no_license
|
excelsky/Leet1337Code
|
5de777f5263ea3f7bbf05a6c77aa893c0871fb63
|
804b4018cfc8858563e3e166640845f58ff973c5
|
refs/heads/master
| 2023-05-07T15:54:03.824463
| 2021-05-28T08:30:11
| 2021-05-28T08:30:11
| 277,186,940
| 0
| 0
| null | 2020-08-03T00:26:50
| 2020-07-04T21:20:52
|
Python
|
UTF-8
|
Python
| false
| false
| 589
|
py
|
# https://leetcode.com/problems/merge-two-sorted-lists/
# 6gaksu
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
if l1 is None:
return l2
elif l2 is None:
return l1
elif l1.val < l2.val:
l1.next = self.mergeTwoLists(l1.next, l2)
return l1
else:
l2.next = self.mergeTwoLists(l1, l2.next)
return l2
|
[
"analytics20132333@gmail.com"
] |
analytics20132333@gmail.com
|
dcb17f1f1d480bffcbf9ca0fc7438d32433c67d4
|
8d3daff9dc9a6f92fb6d43ab87ad2d03dfa43346
|
/Graph Algo/Articulation_points.py
|
cde4224004a37a73dfed88acb0b41eb019db3163
|
[] |
no_license
|
avikram553/Basics-of-Python
|
3156240aa14a5df11d1345cbd99c11e61085ea6a
|
2cc6bbff1fbb8d291a887a94904da1001066df8a
|
refs/heads/master
| 2021-06-28T23:51:51.400629
| 2020-09-30T19:15:52
| 2020-09-30T19:15:52
| 172,511,602
| 0
| 1
| null | 2020-09-30T19:15:54
| 2019-02-25T13:31:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
def dfs(node,par):
global count
global ti
visited[node]=True
low[node]=ti;f_time[node]=ti
ti+=1
child=0
for i in graph[node]:
if(i==par):
continue
if(visited[i]):
low[node]=min(low[node],f_time[i])
else:
dfs(i,node)
low[node]=min(low[node],low[i])
if(f_time[node]<=low[i] and par!=-1):
articulation[node]=0
child+=1
if(par==-1 and child>=2):
articulation[node]=0
for _ in range(int(input())):
n,m=map(int,input().split())
graph={}
for i in range(1,n+1): graph[i]=[]
for i in range(m):
a,b=map(int,input().split())
graph[a].append(b)
graph[b].append(a)
visited=[False for i in range(n+1)]
low=[-1 for i in range(n+1)]
f_time=[-1 for i in range(n+1)]
ti=0;count=0
articulation={}
for i in range(1,n+1):
if(not visited[i]):
dfs(i,-1)
print(articulation.keys())
'''
2
5 5
1 2
1 3
3 2
3 4
5 4
7 6
1 2
2 3
2 4
2 5
3 6
3 7
'''
|
[
"akashkbhagat221199@gmail.com"
] |
akashkbhagat221199@gmail.com
|
ed59a73c5deb252f2f02fd1988dbfe8cb83a61b2
|
6f02d7bb0720e4f1ad7d912d09cf38fbc98022e9
|
/quizz/views.py
|
d62256bb28681d92039a3d59977e4f58a73e690a
|
[] |
no_license
|
bhaskarmehta/Quizz_django_app
|
d525338bf2ed62860724ba15e94142c506749b98
|
2e2257c01ab8f1292f96a5265729b182a18ddc51
|
refs/heads/main
| 2023-07-02T18:01:46.467189
| 2021-07-28T20:08:00
| 2021-07-28T20:08:00
| 390,488,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from quizz.models import Quizz # if we had more than one model we could have given *
# Create your views here.
#def hello(request):
# return render(request,'main_page.html')
#def add(request):
# val1=int(request.POST['num1'])
# val2=int(request.POST['num2'])
# res=(val1)+(val2)
#return render(request,'addition_result.html',{'result':res})
#comment for testing git commit
def home(request):
if request.method == 'POST':
#print(request.POST)
que = Quizz.objects.all()
score=0
total = 0
temp1 = '0'
for q in que:
total += 1
print(q.Question)
print(q.Correct_Answer)
print("Hello")
temp1=str(total)
print(request.POST.get(temp1))
if q.Correct_Answer == request.POST.get(temp1):
score += 1
context ={
'score': score,
'total': total
}
return render(request, 'result.html', context)
else:
que = Quizz.objects.all()
context = {
'que': que
}
return render(request, 'index.html', context)
|
[
"bhaskarmehta422@gmail.com"
] |
bhaskarmehta422@gmail.com
|
15ddbc3e329f615402c3491a9ebc094e13ddadc5
|
c757437c6c432da26e209e36dd64faf8aca18478
|
/audio/models.py
|
688eb413fc07d43185c45a9b2305cf07641df3a2
|
[] |
no_license
|
piyush626/audiotrack_crud_app
|
298283721ea5278b36fce556e2b52b082f0f3989
|
4b97cd65a2e0956d5d6405f8b253af08921cc3e5
|
refs/heads/master
| 2023-04-17T19:46:13.363796
| 2021-05-01T11:18:04
| 2021-05-01T11:18:04
| 363,387,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
# from django.core.exceptions import MultipleObjectsReturned
from django.db import models
# from django.contrib.auth.models import User
from django.db.models.deletion import CASCADE
from django.core.validators import MaxValueValidator
# Create your models here.
# class CustomUser(models.Model):
# user = models.ForeignKey(User,on_delete=CASCADE)
class SongFile(models.Model):
id = models.IntegerField(primary_key=True,null=False,blank=False,unique=True)
song_name = models.CharField(max_length=100,null=False,blank=False)
duration_seconds = models.IntegerField(null=False,blank=False)
upload_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.song_name
class PodcastFile(models.Model):
id = models.IntegerField(primary_key=True,null=False,blank=False,unique=True)
podcast_name = models.CharField(max_length=100,null=False,blank=False)
duration_seconds = models.IntegerField(null=False,blank=False)
upload_time = models.DateTimeField(auto_now_add=True)
host = models.CharField(max_length=100,null=False,blank=False)
Number_of_participant = models.PositiveIntegerField(default=0,validators=[MaxValueValidator(10)])
def __str__(self):
return self.podcast_name
@property
def candidates(self):
return self.participants_set.all()
class Participants(models.Model):
podcastfile = models.ForeignKey(PodcastFile,on_delete=models.CASCADE,null=True)
participant_name = models.CharField(max_length=100,blank=True,null=True)
def __str__(self):
return self.participant_name
class AudioBookFile(models.Model):
id = models.IntegerField(primary_key=True,null=False,blank=False,unique=True)
title = models.CharField(max_length=100,null=False,blank=False)
author = models.CharField(max_length=100,null=False,blank=False)
narrator = models.CharField(max_length=100,null=False,blank=False)
duration_seconds = models.IntegerField(null=False,blank=False)
upload_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
|
[
"agarwalpiyush626@gmail.com"
] |
agarwalpiyush626@gmail.com
|
4e56cca92d1272e6d0ba1a74d6fce1c09303cf7b
|
e19b649ff2136be1a6ef256d8b96d7e240578615
|
/Desktop/Ecommerce/ecommerce/store/models.py
|
6abdd3c1ae38e74b49655e8e4aacb092ea35375c
|
[] |
no_license
|
rawalhimal/Himal-Store-Using-Python-Django
|
3f15c0a80b319f948d493e4a1bfa982fb10f1518
|
7f331a6d5d4671e6d4eb0fed82139a41516f7d22
|
refs/heads/master
| 2022-12-16T03:59:32.595282
| 2020-09-23T18:16:07
| 2020-09-23T18:16:07
| 298,055,834
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,724
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Customer(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE,null=True,blank=True)
name=models.CharField(max_length=200,null=True)
email=models.CharField(max_length=200,null=True)
def __str__(self):
return self.name
class Product(models.Model):
name=models.CharField(max_length=200,null=True)
price=models.DecimalField(max_digits=7, decimal_places=2)
digital=models.BooleanField(default=False,null=True,blank=True)
image=models.ImageField(null=True,blank=True)
def __str__(self):
return self.name
@property
def imageURL(self):
try:
url=self.image.url
except:
url=''
return url
class Order(models.Model):
customer=models.ForeignKey(Customer,on_delete=models.SET_NULL,null=True,blank=True)
date_ordered=models.DateTimeField(auto_now_add=True)
complete=models.BooleanField(default=False)
transaction_id=models.CharField(max_length=100,null=True)
def __str__(self):
return str(self.id)
@property
def shipping(self):
shipping=False
orderitems=self.orderitem_set.all()#get all order items
for i in orderitems:#use loop to check whether we need shipping or not
if i.product.digital == False:
shipping=True
return shipping
@property
def get_cart_total(self):
orderitems=self.orderitem_set.all()
total=sum([item.get_total for item in orderitems])
return total
@property
def get_cart_items(self):
orderitems=self.orderitem_set.all()
total=sum([item.quantity for item in orderitems])
return total
class OrderItem(models.Model):
product=models.ForeignKey(Product,on_delete=models.SET_NULL,null=True)
order=models.ForeignKey(Order,on_delete=models.SET_NULL,null=True)
quantity=models.IntegerField(default=0,null=True,blank=True)
date_added=models.DateTimeField(auto_now_add=True)
@property
def get_total(self):
total=self.product.price * self.quantity
return total
class ShippingAddress(models.Model):
customer=models.ForeignKey(Customer,on_delete=models.SET_NULL,null=True)
order=models.ForeignKey(Order,on_delete=models.SET_NULL,null=True)
address=models.CharField(max_length=200,null=False)
city=models.CharField(max_length=200,null=False)
state=models.CharField(max_length=200,null=False)
zipcode=models.CharField(max_length=200,null=False)
date_added=models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.address
|
[
"himalrawal500@gmail.com"
] |
himalrawal500@gmail.com
|
c879f1c643a18b5bda3c3c427dd5e2b82672d1b8
|
9d1b192ea44c0a76ec5b019126ef2a34c6c3cd4a
|
/collection/api/json_request.py
|
c3957643241a207293267e0f4b60d38b5b938ee4
|
[] |
no_license
|
PureAppCrystal/analysis_sbms
|
bdae97a41010b907cd2ab73004d2e10aede35e17
|
4e907a383933a825f267a5bd03d14565abeb49a1
|
refs/heads/master
| 2021-05-07T04:45:42.988091
| 2017-11-18T18:33:52
| 2017-11-18T18:33:52
| 111,229,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
import sys
from datetime import datetime
from urllib.request import Request, urlopen
import json
def json_request(url='', encording='utf-8', success=None,
error=lambda e: print('%s : %s' % (e, datetime.now()), file=sys.stderr)):
try:
request = Request(url)
resp = urlopen(request)
resp_body = resp.read().decode(encording)
json_result = json.loads(resp_body)
print('%s : success for request [%s]' % (datetime.now(), url))
# callable -> 부를 수 있는지 확인하는 메서드
if callable(success) is False:
return json_result
success(json_result)
except Exception as e:
callable(error) and error(e)
|
[
"purecrystar@gmail.com"
] |
purecrystar@gmail.com
|
e99bbdd9923292c0bb7d6901b1f74b8fd866a19c
|
7f9a73533b3678f0e83dc559dee8a37474e2a289
|
/deep-learning-inference/numpy/distutils/command/install_clib.py
|
6a73f7e3308ff0aa6b9f6454bfa43673fdd0e9b1
|
[
"MIT",
"BSD-3-Clause",
"GPL-3.0-or-later",
"BSD-3-Clause-Open-MPI",
"GCC-exception-3.1",
"GPL-3.0-only"
] |
permissive
|
ryfeus/stepfunctions2processing
|
04a5e83ee9b74e029b79a3f19381ba6d9265fc48
|
0b74797402d39f4966cab278d9718bfaec3386c2
|
refs/heads/master
| 2022-10-08T16:20:55.459175
| 2022-09-09T05:54:47
| 2022-09-09T05:54:47
| 147,448,024
| 128
| 34
|
MIT
| 2022-01-04T18:56:47
| 2018-09-05T02:26:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
from __future__ import division, absolute_import, print_function
import os
from distutils.core import Command
from distutils.ccompiler import new_compiler
from numpy.distutils.misc_util import get_cmd
class install_clib(Command):
description = "Command to install installable C libraries"
user_options = []
def initialize_options(self):
self.install_dir = None
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install', ('install_lib', 'install_dir'))
def run (self):
build_clib_cmd = get_cmd("build_clib")
if not build_clib_cmd.build_clib:
# can happen if the user specified `--skip-build`
build_clib_cmd.finalize_options()
build_dir = build_clib_cmd.build_clib
# We need the compiler to get the library name -> filename association
if not build_clib_cmd.compiler:
compiler = new_compiler(compiler=None)
compiler.customize(self.distribution)
else:
compiler = build_clib_cmd.compiler
for l in self.distribution.installed_libraries:
target_dir = os.path.join(self.install_dir, l.target_dir)
name = compiler.library_filename(l.name)
source = os.path.join(build_dir, name)
self.mkpath(target_dir)
self.outfiles.append(self.copy_file(source, target_dir)[0])
def get_outputs(self):
return self.outfiles
|
[
"ryfeus@gmail.com"
] |
ryfeus@gmail.com
|
252c30cf30986e40822a962c80b2b866ac99dcab
|
174675a707809001d7c4985fef22374644904236
|
/textuti/views.py
|
c8d03b4c6a090d561edd376f8dbc50c6b2f029d7
|
[] |
no_license
|
ankurramba91/Text-Utility
|
89d55e6920f98ba0be42e442ad18e15bbcaa6562
|
98e3851438ad6c0a198af671dfdf765af3c1b457
|
refs/heads/master
| 2022-11-14T13:19:57.797123
| 2020-07-12T06:49:02
| 2020-07-12T06:49:02
| 278,846,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,185
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'index.html')
def analyze(request):
#get the text
djtext=request.POST.get('text', 'default')
#Check Checkbox values
removepunc=request.POST.get('removepunc', 'off')
fullcaps=request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover=request.POST.get('extraspaceremover', 'off')
charcount=request.POST.get('charcount', 'off')
if removepunc == "on":
puntuations='''!()-[]{};:'"\,<>./?@#$%^&*_~'''
analyzed=""
for ch in djtext:
if ch not in puntuations:
analyzed=analyzed+ch
params= {'purpose':'Remove Punctuations ','analyzed_text':analyzed}
djtext = analyzed
if(fullcaps=="on"):
analyzed = ""
for ch in djtext:
analyzed=analyzed+ch.upper()
params = {'purpose': 'Changed to Uppercase ', 'analyzed_text': analyzed}
djtext=analyzed
if (newlineremover == "on"):
analyzed = ""
for char in djtext:
if char != "\n" and char!="\r":
analyzed = analyzed + char
params = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext=analyzed
if (extraspaceremover == "on"):
analyzed = ""
for index, ch in enumerate(djtext):
if not (djtext[index]==" " and djtext[index+1] == " ") :
analyzed = analyzed + ch
params = {'purpose': 'Remove Extra Spaces ', 'analyzed_text': analyzed}
djtext=analyzed
if (charcount == "on"):
analyzed = 1
for index, ch in enumerate(djtext):
if ch>"a" and ch<"z" or ch>"A" and ch<"Z":
analyzed = analyzed + 1
params = {'purpose': 'Count Charater', 'analyzed_text': analyzed}
djtext = analyzed
if (removepunc != "on" and fullcaps!="on" and newlineremover != "on" and extraspaceremover != "on" and charcount != "on") :
return HttpResponse("Please Select any Thing")
return render(request, 'analyze.html', params)
|
[
"ankur.python@gmail.com"
] |
ankur.python@gmail.com
|
ca313e5936960dce97f945627223f35b441baac7
|
1198d79a6b7e1c3cf3f8445911aac8e0494bd12e
|
/bore/optimizers/__init__.py
|
d611be71b53c4b814972aa1e42cf77dd8382a792
|
[
"MIT"
] |
permissive
|
ltiao/bore
|
43556de6051f5c17f1edec3b8ff1f504c1224072
|
f260ea0c7f486ce5a6ff927826604f089784b0b9
|
refs/heads/master
| 2023-05-23T20:33:18.756112
| 2022-10-04T10:24:23
| 2022-10-04T10:24:23
| 279,667,486
| 25
| 5
| null | 2022-12-26T21:33:48
| 2020-07-14T18:48:47
|
Python
|
UTF-8
|
Python
| false
| false
| 75
|
py
|
from .base import minimize_multi_start
__all__ = ["minimize_multi_start"]
|
[
"louistiao@gmail.com"
] |
louistiao@gmail.com
|
2ec6bdf58dc23cd87bf5d2984d45d7963b466c12
|
a6ed990fa4326c625a2a02f0c02eedf758ad8c7b
|
/meraki/sdk/python/cloneOrganizationSwitchDevices.py
|
9bc0fac94c2fbd25fd832b7aee869824a1ed6d1f
|
[] |
no_license
|
StevenKitavi/Meraki-Dashboard-API-v1-Documentation
|
cf2352976c6b6c00c17a5f6442cedf0aeed46c22
|
5ed02a7def29a2ce455a3f2cfa185f76f44789f5
|
refs/heads/main
| 2023-03-02T08:49:34.846055
| 2021-02-05T10:31:25
| 2021-02-05T10:31:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
import meraki
# Defining your API key as a variable in source code is not recommended
API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
# Instead, use an environment variable as shown under the Usage section
# @ https://github.com/meraki/dashboard-api-python/
dashboard = meraki.DashboardAPI(API_KEY)
organization_id = '549236'
source_serial = 'Q234-ABCD-5678'
target_serials = ['Q234-ABCD-0001', 'Q234-ABCD-0002', 'Q234-ABCD-0003']
response = dashboard.switch.cloneOrganizationSwitchDevices(
organization_id, source_serial, target_serials
)
print(response)
|
[
"shiychen@cisco.com"
] |
shiychen@cisco.com
|
1ab23485830cd12757bc9649155036479fb4c222
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/tailscale/conftest.py
|
12f11a5656da1aeeb425b1e64ee2499255c383bd
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,476
|
py
|
"""Fixtures for Tailscale integration tests."""
from __future__ import annotations
from collections.abc import Generator
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from tailscale.models import Devices
from homeassistant.components.tailscale.const import CONF_TAILNET, DOMAIN
from homeassistant.const import CONF_API_KEY
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
@pytest.fixture
def mock_config_entry() -> MockConfigEntry:
"""Return the default mocked config entry."""
return MockConfigEntry(
title="homeassistant.github",
domain=DOMAIN,
data={CONF_TAILNET: "homeassistant.github", CONF_API_KEY: "tskey-MOCK"},
unique_id="homeassistant.github",
)
@pytest.fixture
def mock_setup_entry() -> Generator[AsyncMock, None, None]:
"""Mock setting up a config entry."""
with patch(
"homeassistant.components.tailscale.async_setup_entry", return_value=True
) as mock_setup:
yield mock_setup
@pytest.fixture
def mock_tailscale_config_flow() -> Generator[None, MagicMock, None]:
"""Return a mocked Tailscale client."""
with patch(
"homeassistant.components.tailscale.config_flow.Tailscale", autospec=True
) as tailscale_mock:
tailscale = tailscale_mock.return_value
tailscale.devices.return_value = Devices.parse_raw(
load_fixture("tailscale/devices.json")
).devices
yield tailscale
@pytest.fixture
def mock_tailscale(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]:
"""Return a mocked Tailscale client."""
fixture: str = "tailscale/devices.json"
if hasattr(request, "param") and request.param:
fixture = request.param
devices = Devices.parse_raw(load_fixture(fixture)).devices
with patch(
"homeassistant.components.tailscale.coordinator.Tailscale", autospec=True
) as tailscale_mock:
tailscale = tailscale_mock.return_value
tailscale.devices.return_value = devices
yield tailscale
@pytest.fixture
async def init_integration(
hass: HomeAssistant, mock_config_entry: MockConfigEntry, mock_tailscale: MagicMock
) -> MockConfigEntry:
"""Set up the Tailscale integration for testing."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
return mock_config_entry
|
[
"noreply@github.com"
] |
home-assistant.noreply@github.com
|
055dc14af449b86f7a99c2c06bbd6dbe018be089
|
594cb9d7f4c9fc8e4fee7d1c98e235e77f9496ac
|
/cpu/LanguageModeling/BERT/data/bookcorpus/clean_and_merge_text.py
|
0b297b1d4781e5e9a26e758f44a28eebf032855d
|
[
"Apache-2.0"
] |
permissive
|
okteto/demos
|
16618292cf43aaf08685a27bc14074002baa3ba3
|
15f2af3aae4802b03f43ddbead51e493e54ee2af
|
refs/heads/master
| 2020-05-01T19:39:25.205171
| 2019-03-25T19:48:41
| 2019-03-25T19:54:25
| 177,653,952
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
# NVIDIA
import glob
import os
output_file = os.environ['WORKING_DIR'] + '/intermediate_files/bookcorpus.txt'
download_path = os.environ['WORKING_DIR'] + '/download/'
with open(output_file, "w") as ofile:
for filename in glob.glob(download_path + '*.txt', recursive=True):
with open(filename, mode='r', encoding="utf-8-sig") as file:
for line in file:
if line.strip() != "":
ofile.write(line.strip() + " ")
ofile.write("\n\n ")
|
[
"pablo@okteto.com"
] |
pablo@okteto.com
|
47d5cb014de0fd08b147a7ef4d1a9d55e8e97af5
|
676098df2b5b889791b1e8206dad5b91b304b31c
|
/gameslibrary/manage.py
|
e61d633530fd6d1396eec7c15391dcc9b8049c10
|
[] |
no_license
|
ilya1231231/games_library
|
307c38a73c44c52010bb89a538e7a1483b1b4e71
|
44f3650998bfa0e3cea5c7abe8055c713373de6d
|
refs/heads/main
| 2023-07-22T22:58:33.703262
| 2021-09-07T15:00:34
| 2021-09-07T15:00:34
| 396,894,668
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gameslibrary.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"malygin.ilja@yandex.ru"
] |
malygin.ilja@yandex.ru
|
8f399f69c7912b2903c174d6e3379627f24db756
|
9ffab573ee2a6403a2111ea5bd570a59d7e9c02a
|
/entorno/Lib/site-packages/flask/globals.py
|
341320a1534b16836ef893f789ce2e6b8d23c3a3
|
[] |
no_license
|
jesusalbertoariza/ritz
|
3a45b0246c6ecb6323202c4a395058256a1e5fee
|
2912eb84c90124bfe9b43e9a35f60565e3a3ac94
|
refs/heads/main
| 2023-08-23T11:36:27.239313
| 2021-10-30T18:16:06
| 2021-10-30T18:16:06
| 422,944,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,041
|
py
|
import typing as t
from functools import partial
from werkzeug.local import LocalProxy
from werkzeug.local import LocalStack
if t.TYPE_CHECKING:
from .app import Flask
from .ctx import _AppCtxGlobals
from .sessions import SessionMixin
from .wrappers import Request
_app_ctx_err_msg = """\
Working outside of application context.
This typically means that you attempted to use functionality that needed
to interface with the current application object in a way. To solve
this set up an application context with app.app_context(). See the
documentation for more information.\
"""
_request_ctx_err_msg = """\
Working outside of request context.
This typically means that you attempted to use functionality that needed
an active HTTP request. Consult the documentation on testing for
information about how to avoid this problem.\
"""
_app_ctx_err_msg = """\
Working outside of application context.
This typically means that you attempted to use functionality that needed
to interface with the current application object in some way. To solve
this, set up an application context with app.app_context(). See the
documentation for more information.\
"""
def _lookup_req_object(name):
top = _request_ctx_stack.top
if top is None:
raise RuntimeError(_request_ctx_err_msg)
return getattr(top, name)
def _lookup_app_object(name):
top = _app_ctx_stack.top
if top is None:
raise RuntimeError(_app_ctx_err_msg)
return getattr(top, name)
def _find_app():
top = _app_ctx_stack.top
if top is None:
raise RuntimeError(_app_ctx_err_msg)
return top.app
# context locals
_request_ctx_stack = LocalStack()
_app_ctx_stack = LocalStack()
current_app: "Flask" = LocalProxy(_find_app) # type: ignore
request: "Request" = LocalProxy(partial(_lookup_req_object, "request")) # type: ignore
session: "SessionMixin" = LocalProxy( # type: ignore
partial(_lookup_req_object, "session")
)
g: "_AppCtxGlobals" = LocalProxy(partial(_lookup_app_object, "g")) # type: ignore
|
[
"arizajesus@uninorte.edu.co"
] |
arizajesus@uninorte.edu.co
|
c14d0a54050bc11c3540c29801a46f8f84d3d3fa
|
d506404b414e009369668f29e3fab5cb53499dd3
|
/compoundInterestCalculator.py
|
fbad7d206a52d02990ffe152ffc35dd1249692b7
|
[] |
no_license
|
NeenaU/compound-interest-calculator
|
16b8e75b93ddfd037937c681aeaa40436bc84bda
|
d8a9a53550a50f41f503e7eab9df6aedf2305ba0
|
refs/heads/master
| 2022-11-29T22:50:33.459487
| 2020-08-13T20:30:54
| 2020-08-13T20:30:54
| 280,482,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,367
|
py
|
import tkinter as tk
from tkinter import messagebox
class interestCalculator():
def __init__(self, master):
self.master = master
master.title = ("Compound Interest Calculator")
#Title
self.titleLabel = tk.Label(text="Compound Interest Calculator", font=("Times",16), width=24).grid(row=0,column=0,sticky='EW',pady=15)
#Option widgets
self.initialAmountLabel = tk.Label(master, text="Initial Amount").grid(sticky='W')
self.initialAmountFrame = tk.Frame(master) #widgets are placed side by side in a frame
self.initialAmountFrame.grid(row=2,sticky='NW',pady=6)
self.poundSign1 = tk.Label(self.initialAmountFrame, text="£")
self.poundSign1.grid(row=0,column=0)
self.initialAmount = tk.IntVar()
self.initialAmount.trace("w", self.initialAmountEntryClick)
self.initialAmountEntry = tk.Entry(self.initialAmountFrame, textvariable=self.initialAmount, width=8)
self.initialAmountEntry.grid(row=0,column=1)
self.initialAmountEntry.bind("<1>", self.initialAmountEntryClick)
self.interestRateLabel = tk.Label(text="Yearly Interest Rate").grid(sticky='W')
self.interestRateFrame = tk.Frame(master)
self.interestRateFrame.grid(row=4,sticky='NW',pady=6)
self.interestRate = tk.IntVar()
self.interestRate.trace("w", self.interestRateEntryClick)
self.interestRateEntry = tk.Entry(self.interestRateFrame, textvariable=self.interestRate, width=3)
self.interestRateEntry.grid(row=0,column=0,padx=4)
self.interestRateEntry.bind("<1>", self.interestRateEntryClick)
self.percentSign1 = tk.Label(self.interestRateFrame, text="%")
self.percentSign1.grid(row=0,column=1)
self.timePeriodLabel = tk.Label(text="Time Period").grid(sticky='W')
self.timePeriodFrame = tk.Frame(master)
self.timePeriodFrame.grid(row=6,sticky='NW',pady=6)
self.timePeriod = tk.IntVar()
self.timePeriod.trace("w", self.timePeriodEntryClick)
self.timePeriodEntry = tk.Entry(self.timePeriodFrame, textvariable=self.timePeriod, width=3)
self.timePeriodEntry.bind("<1>", self.timePeriodEntryClick)
self.timePeriodVar = tk.StringVar(master)
self.timePeriodVar.set("years")
self.timePeriodChoice = tk.OptionMenu(self.timePeriodFrame, self.timePeriodVar, "years", "months")
self.timePeriodChoice.grid(row=0,column=0)
self.timePeriodEntry.grid(row=0,column=0,padx=4)
self.timePeriodChoice.grid(row=0,column=1)
self.compoundIntervalLabel = tk.Label(text="Compound Interval").grid(sticky='W')
self.compoundIntervalVar = tk.StringVar(master)
self.compoundIntervalVar.set("yearly")
self.compoundIntervalChoice = tk.OptionMenu(master, self.compoundIntervalVar, "yearly", "monthly", "weekly", "daily")
self.compoundIntervalChoice.grid(sticky='W',pady=6)
self.regularAmountLabel = tk.Label(text="Regular monthly deposit").grid(sticky='W')
self.regularAmountFrame = tk.Frame(master)
self.regularAmountFrame.grid(row=10,sticky='NW',pady=6)
self.regularAmountVar = tk.StringVar(master)
self.regularAmountVar.set("no")
self.regularAmountChoice = tk.OptionMenu(self.regularAmountFrame, self.regularAmountVar, "yes", "no", command=self.checkRegularAmount) #command called when an option is selected
self.regularAmountChoice.grid(row=0,column=0) #if yes is selected, the entry will become visible
self.amountLabel = tk.Label(self.regularAmountFrame, text="Amount:")
self.poundSign2 = tk.Label(self.regularAmountFrame, text="£")
self.regularAmount = tk.IntVar()
self.regularAmount.trace("w", self.regularAmountEntryClick)
self.regularAmountEntry = tk.Entry(self.regularAmountFrame, textvariable=self.regularAmount, width=8)
self.regularAmountEntry.bind("<1>", self.regularAmountEntryClick)
self.calculateButton = tk.Button(master, text="Calculate", command=self.verifyValues)
self.calculateButton.grid(sticky='NW',pady=6)
self.resultText = tk.Text(master, state='disabled', width=37, height=6)
self.resultText.grid(sticky='NW',pady=6)
self.resetButton= tk.Button(master, text="Reset", command=self.reset)
#checkRegularAmount, checkIncreaseDeposits and checkTimePeriod
#add extra widgets onto the screen if the menuoption variable is yes
#remove the extra widgets if not
def checkRegularAmount(self, value):
if value == "no":
self.amountLabel.grid_forget()
self.poundSign2.grid_forget()
self.regularAmountEntry.grid_forget()
else:
self.amountLabel.grid(row=0,column=1)
self.poundSign2.grid(row=0,column=2)
self.regularAmountEntry.grid(row=0,column=3)
#Verifies the values of all entry boxes
#If an error is found, the entry background becomes red
def verifyValues(self):
#Verifying that values are ints
try:
self.initialAmount.get()
except:
messagebox.showerror("Error", "Enter a number for the initial amount")
self.initialAmount.set(0)
self.initialAmountEntry.configure(bg='#D54323')
return
try:
self.interestRate.get()
except:
messagebox.showerror("Error", "Enter a number for the yearly interest rate")
self.interestRate.set(0)
self.interestRateEntry.configure(bg='#D54323')
return
try:
self.timePeriod.get()
except:
messagebox.showerror("Error", "Enter a number for the time period")
self.timePeriod.set(0)
self.timePeriodEntry.configure(bg='#D54323')
return
try:
self.regularAmount.get()
except:
messagebox.showerror("Error", "Enter a number for the regular deposit amount")
self.regularAmount.set(0)
self.regularAmountEntry.configure(bg='#D54323')
return
#Verifying that values are between/higher than a certain number(s)
try:
if self.initialAmount.get() >= 0:
if (self.timePeriodVar.get() == "years" and self.timePeriod.get() > 0) or (self.timePeriodVar.get() == "months" and self.timePeriod.get() > 0):
if self.timePeriodVar.get() == "months" and self.timePeriod.get() > 12:
messagebox.showerror("Error", "Enter a number less than 12 for the number of months")
self.regularAmount.set(0)
self.timePeriodEntry.configure(bg='#D54323')
return
else:
if self.regularAmount.get() >= 0:
self.calculateResult()
else:
messagebox.showerror("Error", "Enter a number greater than 0 for the regular amount")
self.regularAmount.set(0)
self.regularAmountEntry.configure(bg='#D54323')
return
else:
messagebox.showerror("Error", "Enter a number greater than or equal to 0 for the time period")
self.timePeriod.set(0)
self.timePeriodEntry.configure(bg='#D54323')
return
else:
messagebox.showerror("Error", "Enter a number greater than or equal to 0 for the initial amount")
self.initialAmount.set(0)
self.initialAmountEntry.configure(bg='#D54323')
return
except:
return
#These 6 functions change the background of an entry back to white when the user clicks on or types in it
def initialAmountEntryClick(self, *args):
if self.initialAmountEntry['bg'] == '#D54323':
self.initialAmountEntry.configure(bg='#FFFFFF')
def interestRateEntryClick(self, *args):
if self.interestRateEntry['bg'] == '#D54323':
self.interestRateEntry.configure(bg='#FFFFFF')
def timePeriodEntryClick(self, *args):
if self.timePeriodEntry['bg'] == '#D54323':
self.timePeriodEntry.configure(bg='#FFFFFF')
def monthsEntryClick(self, *args):
if self.monthsEntry['bg'] == '#D54323':
self.monthsEntry.configure(bg='#FFFFFF')
def regularAmountEntryClick(self, *args):
if self.regularAmountEntry['bg'] == '#D54323':
self.regularAmountEntry.configure(bg='#FFFFFF')
def calculateResult(self):
self.calculateButton.grid_forget()
self.resetButton.grid(sticky='NW',row=11,pady=6)
#Principal balance
p = self.initialAmount.get()
#Interest rate
r = self.interestRate.get() / 100
#n = number of times interest applied per time period
if self.compoundIntervalVar.get() == "yearly":
n = 1
elif self.compoundIntervalVar.get() == "monthly":
n = 12
elif self.compoundIntervalVar.get() == "weekly":
n = 52
elif self.compoundIntervalVar.get() == "daily":
n = 365
#t = time periods elapsed
if self.timePeriodVar.get() == "years":
t = self.timePeriod.get()
else:
t = self.timePeriod.get() / 12
#Calculate compound interest
result = round(p * (1 + (r/n)) ** (n*t), 2)
#Include amount made/lost from monthly deposits and calculate interest gained
interestGained = 0
if self.regularAmountVar.get() == "yes":
d = self.regularAmount.get() #regular deposit amount
interestFromDeposits = round(d * (((1+(r/12))**(12*t)-1)/(r/12)) * (1+r/12),2)
result += interestFromDeposits
timeInMonths = 0
if self.timePeriodVar.get() == "years":
timeInMonths = t*12
else:
timeInMonths = t
earningsFromDeposits = d * timeInMonths
interestGained = round(result - p - earningsFromDeposits, 2)
else:
interestGained = round(result - p, 2)
if self.timePeriodVar.get() == "years":
textForResultText = "You started with £" + str(p) + "\nYou ended with £" + str(result) + " over " + str(t) + " years\nYou gained £" + str(interestGained) + " in interest"
else:
textForResultText = "You started with £" + str(p) + "\nYou ended with £" + str(result) + " over " + str(int(t*12)) + " months\nYou gained £" + str(interestGained) + " in interest"
self.resultText.configure(state='normal')
self.resultText.insert(tk.INSERT, textForResultText)
if self.regularAmountVar.get() == "yes":
depositsText = "\nYou also deposited £" + str(earningsFromDeposits) + " \nYou earned £" + str(interestFromDeposits) + " in interest from \nyour deposits"
self.resultText.insert(tk.INSERT, depositsText)
self.resultText.configure(state='disabled')
def reset(self):
self.regularAmount.set(0)
self.interestRate.set(0)
self.timePeriod.set(0)
self.initialAmount.set(0)
self.timePeriodVar.set("years")
self.compoundIntervalVar.set("yearly")
self.regularAmountVar.set("no")
self.resetButton.grid_forget()
self.calculateButton.grid(row=11,sticky='NW',pady=6)
self.resultText.configure(state='normal')
self.resultText.delete(1.0, tk.END)
self.resultText.configure(state='disabled')
def main():
window = tk.Tk()
w = 300
h = 500
screen_width = window.winfo_screenwidth()
screen_height = window.winfo_screenheight()
x = (screen_width/2) - (w/2)
y = (screen_height/2) - (h/2)
window.geometry('%dx%d+%d+%d' % (w, h, x, y))
generator = interestCalculator(window)
window.mainloop()
if __name__ == '__main__':
main()
|
[
"nhu21@bath.ac.uk"
] |
nhu21@bath.ac.uk
|
f2a83c7913432f41e2f93510974a869d19884e9c
|
0461fbeb0f2ef76f977e0ff6000d2eb591b1921c
|
/ZCart/blog/views.py
|
5686c797c5c691fafde190d66ae2cec8ea319b3a
|
[] |
no_license
|
aaditya867/ZCart
|
b37d6b9b7667421a2d3688e6dafa500c06c2fc37
|
971691980ab9fcfd29a1c37bb47427f3cfb95fc6
|
refs/heads/master
| 2021-01-14T01:54:30.577185
| 2020-02-24T08:24:41
| 2020-02-24T08:24:41
| 242,562,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Blogpost
# Create your views here.
def index(req):
myposts = Blogpost.objects.all()
print(myposts)
return render(req,'blog/index.html',{'myposts':myposts})
def blogpost(req, id):
post = Blogpost.objects.filter(post_id=id)[0]
print(post)
return render(req,'blog/blogpost.html',{'post':post})
|
[
"aadshar01@gmail.com"
] |
aadshar01@gmail.com
|
793dfd696fe7a572fc9bc4e5c47092e593a51fae
|
980d9786062a70b08f474e1a7eabca4edd14c528
|
/scripts/plot_ber_bp_vs_ms.py
|
b222cd304708f2d5184c6d362f44a4bb54f38b21
|
[] |
no_license
|
shakhmetov/labrador-ldpc-c
|
15b9a5a58191248aeac4f8f236a0ec3b462a32e4
|
e0196018bac90d8fa1d7f341feb194e2712e073d
|
refs/heads/master
| 2023-03-18T00:26:52.401416
| 2017-05-25T19:30:52
| 2017-05-25T19:30:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from results.ldpc_1280_1024_minsum_corrected import (
ebn0_db, soft_ber, hard_ber_ber, hard_mp_ber, hard_bf_ber, uncoded_ber,
soft_cer, hard_ber_cer, hard_mp_cer, hard_bf_cer, uncoded_cer, soft_ucer,
hard_ber_ucer, hard_mp_ucer, hard_bf_ucer)
from results.ber_256_128_minsum import ebn0_db, soft_ber as soft_ber_ms, soft_cer as soft_cer_ms
from results.ber_256_128_bp import soft_ber as soft_ber_bp, soft_cer as soft_cer_bp
plt.figure(figsize=(12, 8))
plt.plot(ebn0_db, np.array(soft_ber_ms), 'g-x', label="Min-Sum (BER)")
plt.plot(ebn0_db, np.array(soft_ber_bp), 'b-x', label="Belief Propagation (BER)")
plt.plot(ebn0_db, np.array(soft_cer_ms), 'g--x', label="Min-Sum (CER)")
plt.plot(ebn0_db, np.array(soft_cer_bp), 'b--x', label="Belief Propagation (CER)")
plt.legend(loc='lower left')
plt.semilogy()
plt.xlabel("Eb/N0 (dB)")
plt.title("LDPC (256, 128) Benchmark")
plt.grid()
plt.ylim(1e-5, 1e0)
plt.savefig("results/ber_bp_vs_ms.pdf")
|
[
"adam@adamgreig.com"
] |
adam@adamgreig.com
|
25a5502d2669bb9cba32e5be6c9185a8bf5c9510
|
1b5f28f56c648960608da9a54b778b2e2805247b
|
/slack_django_webhook/slack_messages/models.py
|
e9276ce8160c6d401f514141dc114e2ca6e73829
|
[] |
no_license
|
davidsonlima/slack-django-webhook
|
87ae404585ce489ece09f1d817c61358e5ed7548
|
f34d4fbbd8a3b403f244210e1ced8807e04f98c9
|
refs/heads/master
| 2021-05-06T11:24:32.801148
| 2017-12-15T18:17:31
| 2017-12-15T18:17:31
| 114,286,943
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
from django.db import models
from django.utils import timezone
# from django_hstore import hstore
class WebhookTransaction(models.Model):
UNPROCESSED = 1
PROCESSED = 2
ERROR = 3
STATUSES = (
(UNPROCESSED, 'Unprocessed'),
(PROCESSED, 'Processed'),
(ERROR, 'Error'),
)
date_event_generated = models.DateTimeField()
date_received = models.DateTimeField(default=timezone.now)
# body = hstore.SerializedDictionaryField()
body = models.TextField()
# request_meta = hstore.SerializedDictionaryField()
request_meta = models.TextField()
status = models.CharField(max_length=250, choices=STATUSES, default=UNPROCESSED)
# objects = hstore.HStoreManager()
def __unicode__(self):
return u'{0}'.format(self.date_event_generated)
TransactionalData = ''
class Message(models.Model):
date_processed = models.DateTimeField(default=timezone.now)
webhook_transaction = models.OneToOneField(WebhookTransaction)
team_id = models.CharField(max_length=250)
team_domain = models.CharField(max_length=250)
channel_id = models.CharField(max_length=250)
channel_name = models.CharField(max_length=250)
user_id = models.CharField(max_length=250)
user_name = models.CharField(max_length=250)
text = models.TextField()
trigger_word = models.CharField(max_length=250)
def __unicode__(self):
return u'{}'.format(self.user_name)
|
[
"100*pmim"
] |
100*pmim
|
c2150e17babe58221fccdb67af9bf2ed1bb87b6e
|
310314ae30de059c4dd532d763915af44c9a7d53
|
/tests/test_providers_logger_object.py
|
319b057d5c6b80d85ca26a8ed30ab978da5783dd
|
[
"Apache-2.0"
] |
permissive
|
robertboston80/logme
|
8b29661cf153a0abc5641513bb213312f771d193
|
586b720fd5d01dc2deee91b685ee628679990080
|
refs/heads/master
| 2020-03-09T19:32:58.401638
| 2018-03-21T19:26:42
| 2018-03-21T19:26:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,768
|
py
|
import pytest
from pathlib import Path
import logging
from logme.providers import LogmeLogger
from logme.config import get_config_content
from logme.exceptions import InvalidConfig, DuplicatedHandler, InvalidOption
class TestLogmeLogger:
@classmethod
def setup(cls):
cls.config = get_config_content(__file__)
cls.logger = LogmeLogger('test_logger', cls.config)
# ---------------------------------------------------------------------------
# Test overall functionality
# ---------------------------------------------------------------------------
def test_logger(self):
assert self.logger.level == 10
def test_logging(self, caplog):
self.logger.info('my logging message')
captured = caplog.record_tuples[0]
assert captured[0] == 'test_logger'
assert captured[1] == 20
assert captured[2] == 'my logging message'
def test_non_existent_attr(self):
with pytest.raises(AttributeError) as e_info:
self.logger.foo()
assert e_info.value.args[0] == "LogmeLogger object has no attribute 'foo'."
def test_handlers(self):
handlers = self.logger.handlers
assert len(handlers) == 1
assert isinstance(handlers[0], logging.StreamHandler)
def test_set_handlers_twice(self):
self.logger._set_handlers()
assert len(self.logger.handlers) == 1
assert isinstance(self.logger.handlers[0], logging.StreamHandler)
# ---------------------------------------------------------------------------
# Test individual methods
# ---------------------------------------------------------------------------
def test_get_handler_filehandler(self, file_config_content):
logger = LogmeLogger('file_logger', file_config_content)
logger.info('my log message for file handler')
log_path = Path(file_config_content['FileHandler']['filename'])
assert log_path.exists()
with open(log_path) as file:
assert file.readline() == 'file_logger::my log message for file handler\n'
@pytest.mark.parametrize('exception, handler_name',
[pytest.param(ValueError, 'FileHandler',
id='exception raised when file handler filename is None'),
pytest.param(InvalidConfig, 'SocketHandler',
id='exception raised when handler_name passed '
'is not configured in logme.ini file')])
def test_get_handler_raise(self, exception, handler_name):
with pytest.raises(exception):
self.logger._get_handler(handler_name)
def test_set_handlers_handler_level_config(self, tmpdir):
config = get_config_content(__file__, 'my_test_logger')
logger = LogmeLogger('handler_level_conf', config)
handler = logger.handlers[0]
assert handler.level == 20 # INFO
assert handler.formatter._fmt == '{asctime}::{message}'
def test_handler_exist(self):
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('{asctime} - {name} - {levelname} - {module}::{funcName}::{message}')
stream_handler.setFormatter(formatter)
assert self.logger._handler_exist(stream_handler)
def test_add_handler(self, tmpdir):
assert len(self.logger.handlers) == 1
assert self.logger.handlers[0].__class__ == logging.StreamHandler
self.logger.add_handler('FileHandler', formatter='{name}->{message}',
level='debug', filename=str(tmpdir.join('dummy.log')))
assert len(self.logger.handlers) == 2
assert set(map(lambda x: x.__class__, self.logger.handlers)) == {logging.StreamHandler, logging.FileHandler}
def test_add_handlers_raise(self, tmpdir):
self.logger.add_handler('FileHandler', formatter='{name}->{message}',
level='debug', filename=str(tmpdir.join('dummy.log')))
with pytest.raises(DuplicatedHandler):
self.logger.add_handler('FileHandler', formatter='{name}->{message}',
level='debug', filename=str(tmpdir.join('dummy.log')))
def test_add_handler_allow_dup(self):
logger = LogmeLogger('allow_duplicate', self.config)
assert len(logger.handlers) == 1
assert logger.handlers[0].__class__ == logging.StreamHandler
logger.add_handler('StreamHandler', formatter='{asctime} - {name} - {levelname} - {module}::{funcName}::{message}',
level='debug', allow_duplicate=True)
assert len(logger.handlers) == 2
assert logger._get_handler_attr(logger.handlers[0]) == logger._get_handler_attr(logger.handlers[1])
def test_get_handler_attr(self, socket_handler):
attrs = self.logger._get_handler_attr(socket_handler)
expected = {
'formatter': '{asctime} - {name}::{message}',
'level': 10,
'host': '127.0.0.7',
'port': '8080'
}
assert attrs == expected
def test_reset_handlers(self):
logger = LogmeLogger('reset_logger', self.config)
handler_classes = [i.__class__ for i in logger.handlers]
assert handler_classes[0] == logging.StreamHandler
logger.reset_config(config_name='socket_config')
assert logger.handlers[0].__class__ == logging.handlers.SocketHandler
def test_reset_handler_rename(self):
logger = LogmeLogger('rename_logger', self.config)
assert logger.name == 'rename_logger'
config = get_config_content(__file__, name='socket_config')
logger.reset_config(config=config, name='logger_new_name')
assert logger.name == 'logger_new_name'
@pytest.mark.parametrize('args, message',
[pytest.param({'config_name': 'socket_config', 'config': {'formatter': 'hello'}},
"Can only set keyword argument of either "
"'config_name' or 'config', not both.",
id='InvalidOption raised when both config_name and config are set'),
pytest.param({}, "must specify one of 'config_name' or 'config'.",
id="InvalidOption raised when neither config_name nor config are set")])
def test_reset_handlers_raise(self, args, message):
with pytest.raises(InvalidOption) as e_info:
self.logger.reset_config(**args)
assert e_info.value.args[0] == message
|
[
"petitelumiere90@gmail.com"
] |
petitelumiere90@gmail.com
|
b5e5920123fb9b09a51cb16ac17166d77ef26d45
|
34c65da6ed9750a9bb31efe3acfb34b686526276
|
/esxitools/backup.py
|
50cf9a008b7816b5d29187f1c09b3baf2623fb34
|
[
"MIT"
] |
permissive
|
itamaro/esxi-tools
|
45bb5409251c56df9561aee8679ab6cfdbb043c7
|
5fb9ffe33d531a401d965c40df5845b06a9b030b
|
refs/heads/master
| 2021-01-02T23:06:53.907733
| 2013-12-14T21:26:32
| 2013-12-14T21:26:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,135
|
py
|
import os
import datetime
from glob import glob
import re
from tendo import singleton
import paramiko
from scp import SCPClient
from ftplib import FTP
from string import Template
from tempfile import mkstemp
import logging
import io
import utils
log_stream = io.StringIO()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(log_stream)
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter(u'%(asctime)s\t%(levelname)s\t%(message)s'))
logger.addHandler(sh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
try:
import settings
except ImportError:
logger.error(u'No settings.py file found!')
import sys
sys.exit(1)
def is_time_in_window(t, ranges):
for ts, te in ranges:
if ts <= t <= te:
return True
return False
def get_current_time():
import time
now = time.localtime()
return datetime.time(now.tm_hour, now.tm_min, now.tm_sec)
class BackupProfile(object):
_no_such_file_or_dir_re = re.compile(u'No such file or directory')
_backup_archive_re = re.compile(u'(?P<vmname>.+)\-'
'(?P<ts>\d{4}\-\d{2}\-\d{2}\_\d{2}\-\d{2}\-\d{2})\.tar\.gz')
_t = None
_chan = None
@classmethod
def _get_current_time(cls):
return datetime.datetime.now()
@classmethod
def _apply_template(cls, tmpl_file_path, tmpl_params, out_file_path=None):
"""
Applies template-parameters to template-file.
Creates an output file with applied template.
If `out_file_path` not specified, a temp file will be used.
"""
# Read the content of the file as a template string
with open(tmpl_file_path, 'r') as tmpl_file:
tmpl_str = Template(tmpl_file.read())
# Apply the template and save to the output file
out_string = tmpl_str.safe_substitute(tmpl_params)
if not out_file_path:
f, out_file_path = mkstemp(text=True)
os.close(f)
with io.open(out_file_path, 'w', newline='\n') as f:
f.write(out_string)
return out_file_path
def __init__(self, profile_dict):
self.__dict__.update(profile_dict)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._close_ssh_transport()
def _get_ssh_transport(self):
if self._t:
return self._t
self._t = paramiko.Transport((self.host_ip, self.ssh_port))
self._t.start_client()
self._t.auth_password(self.ssh_user, self.ssh_password)
return self._t
def _close_ssh_transport(self):
self._close_ssh_session()
if self._t:
self._t.close()
self._t = None
def _get_ssh_session(self):
# if self._chan and not self._chan.closed:
# print 'pre', self._chan
# return self._chan
self._chan = self._get_ssh_transport().open_session()
self._chan.set_combine_stderr(True)
return self._chan
def _close_ssh_session(self):
if self._chan:
self._chan.close()
self._chan = None
def _run_ssh_command(self, cmd):
# Open an SSH session and execute the command
chan = self._get_ssh_session()
chan.exec_command('%s ; echo exit_code=$?' % (cmd))
stdout = ''
x = chan.recv(1024)
while x:
stdout += x
x = chan.recv(1024)
output = stdout.strip().split('\n')
exit_code = re.match('exit_code\=(\-?\d+)', output[-1]).group(1)
if not '0' == exit_code:
logger.debug(u'SSH command "%s" failed with output:\n%s' %
(cmd, '\n'.join(output)))
raise RuntimeWarning(u'Remote command failed with code %s' %
(exit_code))
return '\n'.join(output[:-1])
def _get_vm_config(self, vmname, config):
vm_dict = self.backup_vms[vmname]
if config in vm_dict:
return vm_dict[config]
return self.default_vm_config[config]
def _list_backup_archives(self):
glob_str = os.path.join(self.backups_archive_dir, u'*.tar.gz')
return glob(glob_str)
def _list_backup_archives_for_vm(self, vmname):
glob_str = os.path.join(self.backups_archive_dir,
u'%s-*.tar.gz' % (vmname))
return glob(glob_str)
def get_latest_archives(self):
"""
Returns dictionary of existing archives in `backup_archive_dir`,
with VM names as keys and the latest available backup timestamp
as value.
"""
res = dict()
for archive_path in self._list_backup_archives():
_, archive = os.path.split(archive_path)
m = re.match(u'(?P<vmname>.+)\-'
'(?P<ts>\d{4}\-\d{2}\-\d{2}\_\d{2}\-\d{2}\-\d{2})\.tar\.gz',
archive)
if m:
vmname = m.groupdict()[u'vmname']
ts = datetime.datetime.strptime(m.groupdict()[u'ts'],
'%Y-%m-%d_%H-%M-%S')
if vmname in res:
if ts > res[vmname]:
res[vmname] = ts
else:
res[vmname] = ts
return res
def is_vm_backup_overdue(self, vmname, ts):
"Returns True if `vmname` backup from `ts` is older than period"
time_since_last_backup = self._get_current_time() - ts
if not vmname in self.backup_vms:
logger.warning(u'VM "%s" not in profile, but archive found' %
(vmname))
return False
period = self._get_vm_config(vmname, u'period')
assert type(period) == datetime.timedelta
return time_since_last_backup >= period
def get_next_vm_to_backup(self):
"""
"""
# First priority - VMs with no existing archives
for vmname in self.backup_vms.keys():
if not self._list_backup_archives_for_vm(vmname):
logger.debug(u'VM "%s" is ready next (no existing archives)' %
vmname)
return vmname
# Second priority - the VM with the oldest archive that is overdue
ret_vm = None
ret_vm_last_backup = None
for vmname, ts in self.get_latest_archives().iteritems():
if self.is_vm_backup_overdue(vmname, ts):
logger.debug(u'VM "%s" backup is overdue' % (vmname))
if ret_vm_last_backup:
if ts < ret_vm_last_backup:
ret_vm = vmname
ret_vm_last_backup = ts
else:
ret_vm = vmname
ret_vm_last_backup = ts
return ret_vm
def _upload_file(self, local_source, remote_destination):
scp = SCPClient(self._get_ssh_transport())
scp.put(local_source, remote_destination)
def _set_remote_chmod(self, remote_file):
return self._run_ssh_command(u'chmod +x %s' % (remote_file))
def _remove_remote_file(self, remote_file):
self._run_ssh_command('rm %s' % (remote_file))
def _remove_local_file(self, file):
os.remove(file)
def _parse_ghettovcb_output(self, raw_output):
ret_dict = {u'WARNINGS': list()}
info_prefix = u'\d{4}\-\d{2}\-\d{2} \d{2}\:\d{2}\:\d{2} \-\- info\:'
config_matcher = re.compile(
u'%s CONFIG \- (?P<key>\w+) \= (?P<val>.+)' % (info_prefix))
warn_matcher = re.compile(u'%s WARN\: (?P<msg>.+)' % (info_prefix))
duration_matcher = re.compile(
u'%s Backup Duration\: (?P<time>.+)' % (info_prefix))
final_status_matcher = re.compile(
u'%s \#{6} Final status\: (?P<status>.+) \#{6}' % (info_prefix))
for raw_line in raw_output.split(u'\n'):
config = config_matcher.match(raw_line)
if config:
ret_dict[config.groupdict()[u'key']] = \
config.groupdict()[u'val']
continue
warning = warn_matcher.match(raw_line)
if warning:
ret_dict[u'WARNINGS'].append(warning.groupdict()[u'msg'])
continue
duration = duration_matcher.match(raw_line)
if duration:
ret_dict[u'BACKUP_DURATION'] = duration.groupdict()[u'time']
continue
final_status = final_status_matcher.match(raw_line)
if final_status:
status = final_status.groupdict()[u'status']
ret_dict[u'FINAL_STATUS'] = u'All VMs backed up OK!' == status
continue
return ret_dict
def _run_remote_backup(self, vmname):
"Run ghettovcb script to backup the specified VM"
# Generate ghettovcb script from template
local_script = self._apply_template(
self.ghettovcb_script_template,
{u'RemoteBackupDir': self.remote_backup_dir}
)
# Upload ghettovcb script to host and make it executable
remote_script = '/'.join((self.remote_workdir, 'ghettovcb.sh'))
self._upload_file(local_script, remote_script)
self._set_remote_chmod(remote_script)
# cleanup local temp
self._remove_local_file(local_script)
# Run ghettovcb script for the requested vm-name
backup_cmd = '%s -m %s' % (remote_script, vmname)
cmd_result = self._run_ssh_command(backup_cmd)
self._remove_remote_file(remote_script)
# Parse the output and return the result
return self._parse_ghettovcb_output(cmd_result)
def _archive_remote_backup(self, vmname, backup_dir):
"Tar's and GZip's the backup dir, returning full path of the archive"
remote_workdir = u'/'.join((self.remote_backup_dir, vmname))
remote_archive = u'%s.tar.gz' % (backup_dir)
tar_cmd = u'cd "%s"; tar -cz -f "%s" "%s"' % \
(remote_workdir, remote_archive, backup_dir)
tar_output = self._run_ssh_command(tar_cmd)
if self._no_such_file_or_dir_re.search(tar_output):
raise RuntimeError(u'Tar command failed:\n%s' % (tar_output))
return '/'.join((remote_workdir, remote_archive))
def _download_archive(self, remote_path):
"""
Downloads a remote file at `remote_path` via FTP to
`self.backups_archive_dir` using same file name,
returning the total time it took (in seconds).
"""
from time import time
ts = time()
_, remote_filename = os.path.split(remote_path)
dest_path = os.path.join(self.backups_archive_dir, remote_filename)
ftp = FTP(self.host_ip)
ftp.login(self.ftp_user, self.ftp_password)
with open(dest_path, 'wb') as dest_file:
ftp.retrbinary(u'RETR %s' % (remote_path), dest_file.write)
return time() - ts
def backup_vm(self, vmname):
ghettovcb_output = self._run_remote_backup(vmname)
logger.info(u'ghettovcb output:\n%s' % (
u'\n'.join(
[u'\t%s: %s' % (k,v)
for k,v in ghettovcb_output.iteritems()])))
if not ghettovcb_output[u'FINAL_STATUS']:
# Something failed
return False
backup_name = ghettovcb_output[u'VM_BACKUP_DIR_NAMING_CONVENTION']
backup_dir = u'%s-%s' % (vmname, backup_name)
remote_archive = self._archive_remote_backup(vmname, backup_dir)
download_time = self._download_archive(remote_archive)
logger.info(u'Backup archive "%s" downloaded to "%s" in %f seconds.' %
(remote_archive, self.backups_archive_dir, download_time))
self._remove_remote_file(remote_archive)
logger.info(u'Cleaned up archive from remote host')
def trim_backup_archives(self):
for vmname in self.backup_vms.keys():
vm_archives = self._list_backup_archives_for_vm(vmname)
rot_count = self._get_vm_config(vmname, u'rotation_count')
for archive_to_delete in sorted(vm_archives)[:-rot_count]:
logger.info(u'Deleting archive "%s"' %
(archive_to_delete))
self._remove_local_file(archive_to_delete)
def backup(**kwargs):
# Avoid multiple instances of backup program
me = singleton.SingleInstance(flavor_id=u'esxi-backup')
# Obtain profile configuration
if not u'profile_name' in kwargs:
raise RuntimeError(u'Missing profile_name argument')
profile_name = kwargs[u'profile_name']
if not profile_name in settings.ESXI_BACKUP_PROFILES:
raise RuntimeError(u'No such profile "%s"' % profile_name)
profile = settings.ESXI_BACKUP_PROFILES[profile_name]
logger.info(u'Running backup profile "%s"' % (profile_name))
# Check if profile is currently active
t = get_current_time()
if not is_time_in_window(t, profile['backup_times']):
logger.debug(u'Out of time range. Skipping backup run for profile.')
return True
with BackupProfile(profile) as bp:
next_vm = bp.get_next_vm_to_backup()
if next_vm:
logger.info(u'Running backup for VM "%s"' % (next_vm))
bp.backup_vm(next_vm)
bp.trim_backup_archives()
if bp.email_report:
utils.send_email(
bp.gmail_user, bp.gmail_pwd, bp.from_field, bp.recipients,
u'BACKUP OK %s' % (next_vm), log_stream.getvalue())
else:
logger.info(u'No next VM to backup - Nothing to do.')
return True
|
[
"itamarost@gmail.com"
] |
itamarost@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.